aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/CGAtomic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/CGAtomic.cpp')
-rw-r--r--lib/CodeGen/CGAtomic.cpp962
1 files changed, 750 insertions, 212 deletions
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index daac174c8e0c..da82249fe114 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -13,6 +13,7 @@
#include "CodeGenFunction.h"
#include "CGCall.h"
+#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
#include "clang/CodeGen/CGFunctionInfo.h"
@@ -36,34 +37,94 @@ namespace {
CharUnits LValueAlign;
TypeEvaluationKind EvaluationKind;
bool UseLibcall;
+ LValue LVal;
+ CGBitFieldInfo BFI;
public:
- AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
- assert(lvalue.isSimple());
-
- AtomicTy = lvalue.getType();
- ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
- EvaluationKind = CGF.getEvaluationKind(ValueTy);
-
+ AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
+ : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
+ EvaluationKind(TEK_Scalar), UseLibcall(true) {
+ assert(!lvalue.isGlobalReg());
ASTContext &C = CGF.getContext();
-
- uint64_t ValueAlignInBits;
- uint64_t AtomicAlignInBits;
- TypeInfo ValueTI = C.getTypeInfo(ValueTy);
- ValueSizeInBits = ValueTI.Width;
- ValueAlignInBits = ValueTI.Align;
-
- TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
- AtomicSizeInBits = AtomicTI.Width;
- AtomicAlignInBits = AtomicTI.Align;
-
- assert(ValueSizeInBits <= AtomicSizeInBits);
- assert(ValueAlignInBits <= AtomicAlignInBits);
-
- AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
- ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
- if (lvalue.getAlignment().isZero())
- lvalue.setAlignment(AtomicAlign);
-
+ if (lvalue.isSimple()) {
+ AtomicTy = lvalue.getType();
+ if (auto *ATy = AtomicTy->getAs<AtomicType>())
+ ValueTy = ATy->getValueType();
+ else
+ ValueTy = AtomicTy;
+ EvaluationKind = CGF.getEvaluationKind(ValueTy);
+
+ uint64_t ValueAlignInBits;
+ uint64_t AtomicAlignInBits;
+ TypeInfo ValueTI = C.getTypeInfo(ValueTy);
+ ValueSizeInBits = ValueTI.Width;
+ ValueAlignInBits = ValueTI.Align;
+
+ TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
+ AtomicSizeInBits = AtomicTI.Width;
+ AtomicAlignInBits = AtomicTI.Align;
+
+ assert(ValueSizeInBits <= AtomicSizeInBits);
+ assert(ValueAlignInBits <= AtomicAlignInBits);
+
+ AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
+ ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
+ if (lvalue.getAlignment().isZero())
+ lvalue.setAlignment(AtomicAlign);
+
+ LVal = lvalue;
+ } else if (lvalue.isBitField()) {
+ ValueTy = lvalue.getType();
+ ValueSizeInBits = C.getTypeSize(ValueTy);
+ auto &OrigBFI = lvalue.getBitFieldInfo();
+ auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
+ AtomicSizeInBits = C.toBits(
+ C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
+ .RoundUpToAlignment(lvalue.getAlignment()));
+ auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
+ auto OffsetInChars =
+ (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
+ lvalue.getAlignment();
+ VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
+ VoidPtrAddr, OffsetInChars.getQuantity());
+ auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ VoidPtrAddr,
+ CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
+ "atomic_bitfield_base");
+ BFI = OrigBFI;
+ BFI.Offset = Offset;
+ BFI.StorageSize = AtomicSizeInBits;
+ LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
+ lvalue.getAlignment());
+ LVal.setTBAAInfo(lvalue.getTBAAInfo());
+ AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
+ if (AtomicTy.isNull()) {
+ llvm::APInt Size(
+ /*numBits=*/32,
+ C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
+ AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ }
+ AtomicAlign = ValueAlign = lvalue.getAlignment();
+ } else if (lvalue.isVectorElt()) {
+ ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
+ ValueSizeInBits = C.getTypeSize(ValueTy);
+ AtomicTy = lvalue.getType();
+ AtomicSizeInBits = C.getTypeSize(AtomicTy);
+ AtomicAlign = ValueAlign = lvalue.getAlignment();
+ LVal = lvalue;
+ } else {
+ assert(lvalue.isExtVectorElt());
+ ValueTy = lvalue.getType();
+ ValueSizeInBits = C.getTypeSize(ValueTy);
+ AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
+ lvalue.getType(), lvalue.getExtVectorAddr()
+ ->getType()
+ ->getPointerElementType()
+ ->getVectorNumElements());
+ AtomicSizeInBits = C.getTypeSize(AtomicTy);
+ AtomicAlign = ValueAlign = lvalue.getAlignment();
+ LVal = lvalue;
+ }
UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
}
@@ -76,6 +137,17 @@ namespace {
uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
bool shouldUseLibcall() const { return UseLibcall; }
+ const LValue &getAtomicLValue() const { return LVal; }
+ llvm::Value *getAtomicAddress() const {
+ if (LVal.isSimple())
+ return LVal.getAddress();
+ else if (LVal.isBitField())
+ return LVal.getBitFieldAddr();
+ else if (LVal.isVectorElt())
+ return LVal.getVectorAddr();
+ assert(LVal.isExtVectorElt());
+ return LVal.getExtVectorAddr();
+ }
/// Is the atomic size larger than the underlying value type?
///
@@ -87,7 +159,7 @@ namespace {
return (ValueSizeInBits != AtomicSizeInBits);
}
- bool emitMemSetZeroIfNecessary(LValue dest) const;
+ bool emitMemSetZeroIfNecessary() const;
llvm::Value *getAtomicSizeValue() const {
CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
@@ -99,37 +171,141 @@ namespace {
llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
/// Turn an atomic-layout object into an r-value.
- RValue convertTempToRValue(llvm::Value *addr,
- AggValueSlot resultSlot,
- SourceLocation loc) const;
+ RValue convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot,
+ SourceLocation loc, bool AsValue) const;
/// \brief Converts a rvalue to integer value.
llvm::Value *convertRValueToInt(RValue RVal) const;
- RValue convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot,
- SourceLocation Loc) const;
+ RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
+ AggValueSlot ResultSlot,
+ SourceLocation Loc, bool AsValue) const;
/// Copy an atomic r-value into atomic-layout memory.
- void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
+ void emitCopyIntoMemory(RValue rvalue) const;
/// Project an l-value down to the value field.
- LValue projectValue(LValue lvalue) const {
- llvm::Value *addr = lvalue.getAddress();
+ LValue projectValue() const {
+ assert(LVal.isSimple());
+ llvm::Value *addr = getAtomicAddress();
if (hasPadding())
- addr = CGF.Builder.CreateStructGEP(addr, 0);
+ addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
- return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
- CGF.getContext(), lvalue.getTBAAInfo());
+ return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
+ CGF.getContext(), LVal.getTBAAInfo());
}
+ /// \brief Emits atomic load.
+ /// \returns Loaded value.
+ RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
+ bool AsValue, llvm::AtomicOrdering AO,
+ bool IsVolatile);
+
+ /// \brief Emits atomic compare-and-exchange sequence.
+ /// \param Expected Expected value.
+ /// \param Desired Desired value.
+ /// \param Success Atomic ordering for success operation.
+ /// \param Failure Atomic ordering for failed operation.
+ /// \param IsWeak true if atomic operation is weak, false otherwise.
+ /// \returns Pair of values: previous value from storage (value type) and
+ /// boolean flag (i1 type) with true if success and false otherwise.
+ std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
+ RValue Expected, RValue Desired,
+ llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
+ llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
+ bool IsWeak = false);
+
+ /// \brief Emits atomic update.
+ /// \param AO Atomic ordering.
+ /// \param UpdateOp Update operation for the current lvalue.
+ void EmitAtomicUpdate(llvm::AtomicOrdering AO,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile);
+ /// \brief Emits atomic update.
+ /// \param AO Atomic ordering.
+ void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
+ bool IsVolatile);
+
/// Materialize an atomic r-value in atomic-layout memory.
llvm::Value *materializeRValue(RValue rvalue) const;
+ /// \brief Translates LLVM atomic ordering to GNU atomic ordering for
+ /// libcalls.
+ static AtomicExpr::AtomicOrderingKind
+ translateAtomicOrdering(const llvm::AtomicOrdering AO);
+
private:
bool requiresMemSetZero(llvm::Type *type) const;
+
+ /// \brief Creates temp alloca for intermediate operations on atomic value.
+ llvm::Value *CreateTempAlloca() const;
+
+ /// \brief Emits atomic load as a libcall.
+ void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
+ llvm::AtomicOrdering AO, bool IsVolatile);
+ /// \brief Emits atomic load as LLVM instruction.
+ llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
+ /// \brief Emits atomic compare-and-exchange op as a libcall.
+ llvm::Value *EmitAtomicCompareExchangeLibcall(
+ llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
+ llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
+ llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent);
+ /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
+ std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
+ llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
+ llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
+ llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
+ bool IsWeak = false);
+ /// \brief Emit atomic update as libcalls.
+ void
+ EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile);
+ /// \brief Emit atomic update as LLVM instructions.
+ void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile);
+ /// \brief Emit atomic update as libcalls.
+ void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
+ bool IsVolatile);
+ /// \brief Emit atomic update as LLVM instructions.
+ void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
+ bool IsVolatile);
};
}
+AtomicExpr::AtomicOrderingKind
+AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) {
+ switch (AO) {
+ case llvm::Unordered:
+ case llvm::NotAtomic:
+ case llvm::Monotonic:
+ return AtomicExpr::AO_ABI_memory_order_relaxed;
+ case llvm::Acquire:
+ return AtomicExpr::AO_ABI_memory_order_acquire;
+ case llvm::Release:
+ return AtomicExpr::AO_ABI_memory_order_release;
+ case llvm::AcquireRelease:
+ return AtomicExpr::AO_ABI_memory_order_acq_rel;
+ case llvm::SequentiallyConsistent:
+ return AtomicExpr::AO_ABI_memory_order_seq_cst;
+ }
+ llvm_unreachable("Unhandled AtomicOrdering");
+}
+
+llvm::Value *AtomicInfo::CreateTempAlloca() const {
+ auto *TempAlloca = CGF.CreateMemTemp(
+ (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
+ : AtomicTy,
+ "atomic-temp");
+ TempAlloca->setAlignment(getAtomicAlignment().getQuantity());
+ // Cast to pointer to value type for bitfields.
+ if (LVal.isBitField())
+ return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ TempAlloca, getAtomicAddress()->getType());
+ return TempAlloca;
+}
+
static RValue emitAtomicLibcall(CodeGenFunction &CGF,
StringRef fnName,
QualType resultType,
@@ -172,14 +348,16 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
llvm_unreachable("bad evaluation kind");
}
-bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
- llvm::Value *addr = dest.getAddress();
+bool AtomicInfo::emitMemSetZeroIfNecessary() const {
+ assert(LVal.isSimple());
+ llvm::Value *addr = LVal.getAddress();
if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
return false;
- CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
- AtomicSizeInBits / 8,
- dest.getAlignment().getQuantity());
+ CGF.Builder.CreateMemSet(
+ addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
+ LVal.getAlignment().getQuantity());
return true;
}
@@ -901,29 +1079,53 @@ llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
AggValueSlot resultSlot,
- SourceLocation loc) const {
- if (EvaluationKind == TEK_Aggregate)
- return resultSlot.asRValue();
-
- // Drill into the padding structure if we have one.
- if (hasPadding())
- addr = CGF.Builder.CreateStructGEP(addr, 0);
-
- // Otherwise, just convert the temporary to an r-value using the
- // normal conversion routine.
- return CGF.convertTempToRValue(addr, getValueType(), loc);
+ SourceLocation loc, bool AsValue) const {
+ if (LVal.isSimple()) {
+ if (EvaluationKind == TEK_Aggregate)
+ return resultSlot.asRValue();
+
+ // Drill into the padding structure if we have one.
+ if (hasPadding())
+ addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
+
+ // Otherwise, just convert the temporary to an r-value using the
+ // normal conversion routine.
+ return CGF.convertTempToRValue(addr, getValueType(), loc);
+ }
+ if (!AsValue)
+ // Get RValue from temp memory as atomic for non-simple lvalues
+ return RValue::get(
+ CGF.Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity()));
+ if (LVal.isBitField())
+ return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield(
+ addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
+ if (LVal.isVectorElt())
+ return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
+ LVal.getType(),
+ LVal.getAlignment()),
+ loc);
+ assert(LVal.isExtVectorElt());
+ return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
+ addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
}
-RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
- AggValueSlot ResultSlot,
- SourceLocation Loc) const {
+RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
+ AggValueSlot ResultSlot,
+ SourceLocation Loc,
+ bool AsValue) const {
// Try not to in some easy cases.
assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
- if (getEvaluationKind() == TEK_Scalar && !hasPadding()) {
- auto *ValTy = CGF.ConvertTypeForMem(ValueTy);
+ if (getEvaluationKind() == TEK_Scalar &&
+ (((!LVal.isBitField() ||
+ LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
+ !hasPadding()) ||
+ !AsValue)) {
+ auto *ValTy = AsValue
+ ? CGF.ConvertTypeForMem(ValueTy)
+ : getAtomicAddress()->getType()->getPointerElementType();
if (ValTy->isIntegerTy()) {
assert(IntVal->getType() == ValTy && "Different integer types.");
- return RValue::get(IntVal);
+ return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
} else if (ValTy->isPointerTy())
return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
@@ -935,13 +1137,13 @@ RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
llvm::Value *Temp;
bool TempIsVolatile = false;
CharUnits TempAlignment;
- if (getEvaluationKind() == TEK_Aggregate) {
+ if (AsValue && getEvaluationKind() == TEK_Aggregate) {
assert(!ResultSlot.isIgnored());
Temp = ResultSlot.getAddr();
TempAlignment = getValueAlignment();
TempIsVolatile = ResultSlot.isVolatile();
} else {
- Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp");
+ Temp = CreateTempAlloca();
TempAlignment = getAtomicAlignment();
}
@@ -950,93 +1152,146 @@ RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
->setVolatile(TempIsVolatile);
- return convertTempToRValue(Temp, ResultSlot, Loc);
+ return convertTempToRValue(Temp, ResultSlot, Loc, AsValue);
}
-/// Emit a load from an l-value of atomic type. Note that the r-value
-/// we produce is an r-value of the atomic *value* type.
-RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
- AggValueSlot resultSlot) {
- AtomicInfo atomics(*this, src);
+void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
+ llvm::AtomicOrdering AO, bool) {
+ // void __atomic_load(size_t size, void *mem, void *return, int order);
+ CallArgList Args;
+ Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
+ CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
+ CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(
+ llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))),
+ CGF.getContext().IntTy);
+ emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
+}
- // Check whether we should use a library call.
- if (atomics.shouldUseLibcall()) {
- llvm::Value *tempAddr;
- if (!resultSlot.isIgnored()) {
- assert(atomics.getEvaluationKind() == TEK_Aggregate);
- tempAddr = resultSlot.getAddr();
- } else {
- tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
- }
+llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
+ bool IsVolatile) {
+ // Okay, we're doing this natively.
+ llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
+ Load->setAtomic(AO);
- // void __atomic_load(size_t size, void *mem, void *return, int order);
- CallArgList args;
- args.add(RValue::get(atomics.getAtomicSizeValue()),
- getContext().getSizeType());
- args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
- getContext().VoidPtrTy);
- args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
- getContext().VoidPtrTy);
- args.add(RValue::get(llvm::ConstantInt::get(
- IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
- getContext().IntTy);
- emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
+ // Other decoration.
+ Load->setAlignment(getAtomicAlignment().getQuantity());
+ if (IsVolatile)
+ Load->setVolatile(true);
+ if (LVal.getTBAAInfo())
+ CGF.CGM.DecorateInstruction(Load, LVal.getTBAAInfo());
+ return Load;
+}
+
+/// An LValue is a candidate for having its loads and stores be made atomic if
+/// we are operating under /volatile:ms *and* the LValue itself is volatile and
+/// performing such an operation can be performed without a libcall.
+bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
+ AtomicInfo AI(*this, LV);
+ bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
+ // An atomic is inline if we don't need to use a libcall.
+ bool AtomicIsInline = !AI.shouldUseLibcall();
+ return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
+}
- // Produce the r-value.
- return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
+/// An type is a candidate for having its loads and stores be made atomic if
+/// we are operating under /volatile:ms *and* we know the access is volatile and
+/// performing such an operation can be performed without a libcall.
+bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
+ bool IsVolatile) const {
+ // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
+ bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic(
+ getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty));
+ return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
+}
+
+RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
+ AggValueSlot Slot) {
+ llvm::AtomicOrdering AO;
+ bool IsVolatile = LV.isVolatileQualified();
+ if (LV.getType()->isAtomicType()) {
+ AO = llvm::SequentiallyConsistent;
+ } else {
+ AO = llvm::Acquire;
+ IsVolatile = true;
}
+ return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
+}
- // Okay, we're doing this natively.
- llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
- llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
- load->setAtomic(llvm::SequentiallyConsistent);
+RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
+ bool AsValue, llvm::AtomicOrdering AO,
+ bool IsVolatile) {
+ // Check whether we should use a library call.
+ if (shouldUseLibcall()) {
+ llvm::Value *TempAddr;
+ if (LVal.isSimple() && !ResultSlot.isIgnored()) {
+ assert(getEvaluationKind() == TEK_Aggregate);
+ TempAddr = ResultSlot.getAddr();
+ } else
+ TempAddr = CreateTempAlloca();
+
+ EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile);
+
+ // Okay, turn that back into the original value or whole atomic (for
+ // non-simple lvalues) type.
+ return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
+ }
- // Other decoration.
- load->setAlignment(src.getAlignment().getQuantity());
- if (src.isVolatileQualified())
- load->setVolatile(true);
- if (src.getTBAAInfo())
- CGM.DecorateInstruction(load, src.getTBAAInfo());
+ // Okay, we're doing this natively.
+ auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
// If we're ignoring an aggregate return, don't do anything.
- if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
+ if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
return RValue::getAggregate(nullptr, false);
- // Okay, turn that back into the original value type.
- return atomics.convertIntToValue(load, resultSlot, loc);
+ // Okay, turn that back into the original value or atomic (for non-simple
+ // lvalues) type.
+ return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
}
-
+/// Emit a load from an l-value of atomic type. Note that the r-value
+/// we produce is an r-value of the atomic *value* type.
+RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
+ llvm::AtomicOrdering AO, bool IsVolatile,
+ AggValueSlot resultSlot) {
+ AtomicInfo Atomics(*this, src);
+ return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
+ IsVolatile);
+}
/// Copy an r-value into memory as part of storing to an atomic type.
/// This needs to create a bit-pattern suitable for atomic operations.
-void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
+void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
+ assert(LVal.isSimple());
// If we have an r-value, the rvalue should be of the atomic type,
// which means that the caller is responsible for having zeroed
// any padding. Just do an aggregate copy of that type.
if (rvalue.isAggregate()) {
- CGF.EmitAggregateCopy(dest.getAddress(),
+ CGF.EmitAggregateCopy(getAtomicAddress(),
rvalue.getAggregateAddr(),
getAtomicType(),
(rvalue.isVolatileQualified()
- || dest.isVolatileQualified()),
- dest.getAlignment());
+ || LVal.isVolatileQualified()),
+ LVal.getAlignment());
return;
}
// Okay, otherwise we're copying stuff.
// Zero out the buffer if necessary.
- emitMemSetZeroIfNecessary(dest);
+ emitMemSetZeroIfNecessary();
// Drill past the padding if present.
- dest = projectValue(dest);
+ LValue TempLVal = projectValue();
// Okay, store the rvalue in.
if (rvalue.isScalar()) {
- CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
+ CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
} else {
- CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
+ CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
}
}
@@ -1050,22 +1305,24 @@ llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
return rvalue.getAggregateAddr();
// Otherwise, make a temporary and materialize into it.
- llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
- LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
- emitCopyIntoMemory(rvalue, tempLV);
- return temp;
+ LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType(),
+ getAtomicAlignment());
+ AtomicInfo Atomics(CGF, TempLV);
+ Atomics.emitCopyIntoMemory(rvalue);
+ return TempLV.getAddress();
}
llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
// If we've got a scalar value of the right size, try to avoid going
// through memory.
- if (RVal.isScalar() && !hasPadding()) {
+ if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
llvm::Value *Value = RVal.getScalarVal();
if (isa<llvm::IntegerType>(Value->getType()))
- return Value;
+ return CGF.EmitToMemory(Value, ValueTy);
else {
- llvm::IntegerType *InputIntTy =
- llvm::IntegerType::get(CGF.getLLVMContext(), getValueSizeInBits());
+ llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
+ CGF.getLLVMContext(),
+ LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
if (isa<llvm::PointerType>(Value->getType()))
return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
@@ -1082,12 +1339,324 @@ llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
getAtomicAlignment().getQuantity());
}
+std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
+ llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
+ llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
+ // Do the atomic store.
+ auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
+ auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
+ Success, Failure);
+ // Other decoration.
+ Inst->setVolatile(LVal.isVolatileQualified());
+ Inst->setWeak(IsWeak);
+
+ // Okay, turn that back into the original value type.
+ auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
+ auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
+ return std::make_pair(PreviousVal, SuccessFailureVal);
+}
+
+llvm::Value *
+AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
+ llvm::Value *DesiredAddr,
+ llvm::AtomicOrdering Success,
+ llvm::AtomicOrdering Failure) {
+ // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // void *desired, int success, int failure);
+ CallArgList Args;
+ Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
+ CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
+ CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
+ CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(llvm::ConstantInt::get(
+ CGF.IntTy, translateAtomicOrdering(Success))),
+ CGF.getContext().IntTy);
+ Args.add(RValue::get(llvm::ConstantInt::get(
+ CGF.IntTy, translateAtomicOrdering(Failure))),
+ CGF.getContext().IntTy);
+ auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
+ CGF.getContext().BoolTy, Args);
+
+ return SuccessFailureRVal.getScalarVal();
+}
+
+std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
+ RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
+ llvm::AtomicOrdering Failure, bool IsWeak) {
+ if (Failure >= Success)
+ // Don't assert on undefined behavior.
+ Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
+
+ // Check whether we should use a library call.
+ if (shouldUseLibcall()) {
+ // Produce a source address.
+ auto *ExpectedAddr = materializeRValue(Expected);
+ auto *DesiredAddr = materializeRValue(Desired);
+ auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr,
+ Success, Failure);
+ return std::make_pair(
+ convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false),
+ Res);
+ }
+
+ // If we've got a scalar value of the right size, try to avoid going
+ // through memory.
+ auto *ExpectedVal = convertRValueToInt(Expected);
+ auto *DesiredVal = convertRValueToInt(Desired);
+ auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
+ Failure, IsWeak);
+ return std::make_pair(
+ ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false),
+ Res.second);
+}
+
+static void
+EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ llvm::Value *DesiredAddr) {
+ llvm::Value *Ptr = nullptr;
+ LValue UpdateLVal;
+ RValue UpRVal;
+ LValue AtomicLVal = Atomics.getAtomicLValue();
+ LValue DesiredLVal;
+ if (AtomicLVal.isSimple()) {
+ UpRVal = OldRVal;
+ DesiredLVal =
+ LValue::MakeAddr(DesiredAddr, AtomicLVal.getType(),
+ AtomicLVal.getAlignment(), CGF.CGM.getContext());
+ } else {
+ // Build new lvalue for temp address
+ Ptr = Atomics.materializeRValue(OldRVal);
+ if (AtomicLVal.isBitField()) {
+ UpdateLVal =
+ LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
+ AtomicLVal.getType(), AtomicLVal.getAlignment());
+ DesiredLVal =
+ LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
+ AtomicLVal.getType(), AtomicLVal.getAlignment());
+ } else if (AtomicLVal.isVectorElt()) {
+ UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignment());
+ DesiredLVal = LValue::MakeVectorElt(
+ DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
+ AtomicLVal.getAlignment());
+ } else {
+ assert(AtomicLVal.isExtVectorElt());
+ UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignment());
+ DesiredLVal = LValue::MakeExtVectorElt(
+ DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
+ AtomicLVal.getAlignment());
+ }
+ UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
+ DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
+ UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
+ }
+ // Store new value in the corresponding memory area
+ RValue NewRVal = UpdateOp(UpRVal);
+ if (NewRVal.isScalar()) {
+ CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
+ } else {
+ assert(NewRVal.isComplex());
+ CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
+ /*isInit=*/false);
+ }
+}
+
+void AtomicInfo::EmitAtomicUpdateLibcall(
+ llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile) {
+ auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
+
+ llvm::Value *ExpectedAddr = CreateTempAlloca();
+
+ EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
+ auto *ContBB = CGF.createBasicBlock("atomic_cont");
+ auto *ExitBB = CGF.createBasicBlock("atomic_exit");
+ CGF.EmitBlock(ContBB);
+ auto *DesiredAddr = CreateTempAlloca();
+ if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
+ requiresMemSetZero(
+ getAtomicAddress()->getType()->getPointerElementType())) {
+ auto *OldVal = CGF.Builder.CreateAlignedLoad(
+ ExpectedAddr, getAtomicAlignment().getQuantity());
+ CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
+ getAtomicAlignment().getQuantity());
+ }
+ auto OldRVal = convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false);
+ EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
+ auto *Res =
+ EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
+ CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+void AtomicInfo::EmitAtomicUpdateOp(
+ llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile) {
+ auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
+
+ // Do the atomic load.
+ auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
+ // For non-simple lvalues perform compare-and-swap procedure.
+ auto *ContBB = CGF.createBasicBlock("atomic_cont");
+ auto *ExitBB = CGF.createBasicBlock("atomic_exit");
+ auto *CurBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(ContBB);
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
+ /*NumReservedValues=*/2);
+ PHI->addIncoming(OldVal, CurBB);
+ auto *NewAtomicAddr = CreateTempAlloca();
+ auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
+ requiresMemSetZero(
+ getAtomicAddress()->getType()->getPointerElementType())) {
+ CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
+ getAtomicAlignment().getQuantity());
+ }
+ auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false);
+ EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
+ auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
+ NewAtomicIntAddr, getAtomicAlignment().getQuantity());
+ // Try to write new value using cmpxchg operation
+ auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
+ PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
+ CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
+ RValue UpdateRVal, llvm::Value *DesiredAddr) {
+ LValue AtomicLVal = Atomics.getAtomicLValue();
+ LValue DesiredLVal;
+ // Build new lvalue for temp address
+ if (AtomicLVal.isBitField()) {
+ DesiredLVal =
+ LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
+ AtomicLVal.getType(), AtomicLVal.getAlignment());
+ } else if (AtomicLVal.isVectorElt()) {
+ DesiredLVal =
+ LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
+ AtomicLVal.getType(), AtomicLVal.getAlignment());
+ } else {
+ assert(AtomicLVal.isExtVectorElt());
+ DesiredLVal = LValue::MakeExtVectorElt(
+ DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
+ AtomicLVal.getAlignment());
+ }
+ DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
+ // Store new value in the corresponding memory area
+ assert(UpdateRVal.isScalar());
+ CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
+}
+
+void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
+ RValue UpdateRVal, bool IsVolatile) {
+ auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
+
+ llvm::Value *ExpectedAddr = CreateTempAlloca();
+
+ EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
+ auto *ContBB = CGF.createBasicBlock("atomic_cont");
+ auto *ExitBB = CGF.createBasicBlock("atomic_exit");
+ CGF.EmitBlock(ContBB);
+ auto *DesiredAddr = CreateTempAlloca();
+ if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
+ requiresMemSetZero(
+ getAtomicAddress()->getType()->getPointerElementType())) {
+ auto *OldVal = CGF.Builder.CreateAlignedLoad(
+ ExpectedAddr, getAtomicAlignment().getQuantity());
+ CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
+ getAtomicAlignment().getQuantity());
+ }
+ EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
+ auto *Res =
+ EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
+ CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
+ bool IsVolatile) {
+ auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
+
+ // Do the atomic load.
+ auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
+ // For non-simple lvalues perform compare-and-swap procedure.
+ auto *ContBB = CGF.createBasicBlock("atomic_cont");
+ auto *ExitBB = CGF.createBasicBlock("atomic_exit");
+ auto *CurBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(ContBB);
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
+ /*NumReservedValues=*/2);
+ PHI->addIncoming(OldVal, CurBB);
+ auto *NewAtomicAddr = CreateTempAlloca();
+ auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
+ requiresMemSetZero(
+ getAtomicAddress()->getType()->getPointerElementType())) {
+ CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
+ getAtomicAlignment().getQuantity());
+ }
+ EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
+ auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
+ NewAtomicIntAddr, getAtomicAlignment().getQuantity());
+ // Try to write new value using cmpxchg operation
+ auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
+ PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
+ CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+void AtomicInfo::EmitAtomicUpdate(
+ llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
+ bool IsVolatile) {
+ if (shouldUseLibcall()) {
+ EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
+ } else {
+ EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
+ }
+}
+
+void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
+ bool IsVolatile) {
+ if (shouldUseLibcall()) {
+ EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
+ } else {
+ EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
+ }
+}
+
+void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
+ bool isInit) {
+ bool IsVolatile = lvalue.isVolatileQualified();
+ llvm::AtomicOrdering AO;
+ if (lvalue.getType()->isAtomicType()) {
+ AO = llvm::SequentiallyConsistent;
+ } else {
+ AO = llvm::Release;
+ IsVolatile = true;
+ }
+ return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
+}
+
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
-void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
+void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
+ llvm::AtomicOrdering AO, bool IsVolatile,
+ bool isInit) {
// If this is an aggregate r-value, it should agree in type except
// maybe for address-space qualification.
assert(!rvalue.isAggregate() ||
@@ -1095,54 +1664,64 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
== dest.getAddress()->getType()->getPointerElementType());
AtomicInfo atomics(*this, dest);
+ LValue LVal = atomics.getAtomicLValue();
// If this is an initialization, just put the value there normally.
- if (isInit) {
- atomics.emitCopyIntoMemory(rvalue, dest);
- return;
- }
+ if (LVal.isSimple()) {
+ if (isInit) {
+ atomics.emitCopyIntoMemory(rvalue);
+ return;
+ }
- // Check whether we should use a library call.
- if (atomics.shouldUseLibcall()) {
- // Produce a source address.
- llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
+ // Check whether we should use a library call.
+ if (atomics.shouldUseLibcall()) {
+ // Produce a source address.
+ llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
- // void __atomic_store(size_t size, void *mem, void *val, int order)
- CallArgList args;
- args.add(RValue::get(atomics.getAtomicSizeValue()),
- getContext().getSizeType());
- args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
- getContext().VoidPtrTy);
- args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
- getContext().VoidPtrTy);
- args.add(RValue::get(llvm::ConstantInt::get(
- IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
- getContext().IntTy);
- emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
+ // void __atomic_store(size_t size, void *mem, void *val, int order)
+ CallArgList args;
+ args.add(RValue::get(atomics.getAtomicSizeValue()),
+ getContext().getSizeType());
+ args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicAddress())),
+ getContext().VoidPtrTy);
+ args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy);
+ args.add(RValue::get(llvm::ConstantInt::get(
+ IntTy, AtomicInfo::translateAtomicOrdering(AO))),
+ getContext().IntTy);
+ emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
+ return;
+ }
+
+ // Okay, we're doing this natively.
+ llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
+
+ // Do the atomic store.
+ llvm::Value *addr =
+ atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
+ intValue = Builder.CreateIntCast(
+ intValue, addr->getType()->getPointerElementType(), /*isSigned=*/false);
+ llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
+
+ // Initializations don't need to be atomic.
+ if (!isInit)
+ store->setAtomic(AO);
+
+ // Other decoration.
+ store->setAlignment(dest.getAlignment().getQuantity());
+ if (IsVolatile)
+ store->setVolatile(true);
+ if (dest.getTBAAInfo())
+ CGM.DecorateInstruction(store, dest.getTBAAInfo());
return;
}
- // Okay, we're doing this natively.
- llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
-
- // Do the atomic store.
- llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
- llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
-
- // Initializations don't need to be atomic.
- if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
-
- // Other decoration.
- store->setAlignment(dest.getAlignment().getQuantity());
- if (dest.isVolatileQualified())
- store->setVolatile(true);
- if (dest.getTBAAInfo())
- CGM.DecorateInstruction(store, dest.getTBAAInfo());
+ // Emit simple atomic update operation.
+ atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
}
/// Emit a compare-and-exchange op for atomic type.
///
-std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
+std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
AggValueSlot Slot) {
@@ -1156,56 +1735,15 @@ std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
Obj.getAddress()->getType()->getPointerElementType());
AtomicInfo Atomics(*this, Obj);
- if (Failure >= Success)
- // Don't assert on undefined behavior.
- Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
-
- auto Alignment = Atomics.getValueAlignment();
- // Check whether we should use a library call.
- if (Atomics.shouldUseLibcall()) {
- auto *ExpectedAddr = Atomics.materializeRValue(Expected);
- // Produce a source address.
- auto *DesiredAddr = Atomics.materializeRValue(Desired);
- // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
- // void *desired, int success, int failure);
- CallArgList Args;
- Args.add(RValue::get(Atomics.getAtomicSizeValue()),
- getContext().getSizeType());
- Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())),
- getContext().VoidPtrTy);
- Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)),
- getContext().VoidPtrTy);
- Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)),
- getContext().VoidPtrTy);
- Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)),
- getContext().IntTy);
- Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)),
- getContext().IntTy);
- auto SuccessFailureRVal = emitAtomicLibcall(
- *this, "__atomic_compare_exchange", getContext().BoolTy, Args);
- auto *PreviousVal =
- Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity());
- return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal);
- }
-
- // If we've got a scalar value of the right size, try to avoid going
- // through memory.
- auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected);
- auto *DesiredIntVal = Atomics.convertRValueToInt(Desired);
-
- // Do the atomic store.
- auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress());
- auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal,
- Success, Failure);
- // Other decoration.
- Inst->setVolatile(Obj.isVolatileQualified());
- Inst->setWeak(IsWeak);
+ return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
+ IsWeak);
+}
- // Okay, turn that back into the original value type.
- auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0);
- auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1);
- return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc),
- RValue::get(SuccessFailureVal));
+void CodeGenFunction::EmitAtomicUpdate(
+ LValue LVal, llvm::AtomicOrdering AO,
+ const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
+ AtomicInfo Atomics(*this, LVal);
+ Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
}
void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
@@ -1214,13 +1752,13 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
switch (atomics.getEvaluationKind()) {
case TEK_Scalar: {
llvm::Value *value = EmitScalarExpr(init);
- atomics.emitCopyIntoMemory(RValue::get(value), dest);
+ atomics.emitCopyIntoMemory(RValue::get(value));
return;
}
case TEK_Complex: {
ComplexPairTy value = EmitComplexExpr(init);
- atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
+ atomics.emitCopyIntoMemory(RValue::getComplex(value));
return;
}
@@ -1229,8 +1767,8 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
// of atomic type.
bool Zeroed = false;
if (!init->getType()->isAtomicType()) {
- Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
- dest = atomics.projectValue(dest);
+ Zeroed = atomics.emitMemSetZeroIfNecessary();
+ dest = atomics.projectValue();
}
// Evaluate the expression directly into the destination.