summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp')
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp328
1 files changed, 214 insertions, 114 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp
index 89bde2c..daac174 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp
@@ -46,23 +46,26 @@ namespace {
ASTContext &C = CGF.getContext();
- uint64_t valueAlignInBits;
- std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
+ uint64_t ValueAlignInBits;
+ uint64_t AtomicAlignInBits;
+ TypeInfo ValueTI = C.getTypeInfo(ValueTy);
+ ValueSizeInBits = ValueTI.Width;
+ ValueAlignInBits = ValueTI.Align;
- uint64_t atomicAlignInBits;
- std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
+ TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
+ AtomicSizeInBits = AtomicTI.Width;
+ AtomicAlignInBits = AtomicTI.Align;
assert(ValueSizeInBits <= AtomicSizeInBits);
- assert(valueAlignInBits <= atomicAlignInBits);
+ assert(ValueAlignInBits <= AtomicAlignInBits);
- AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
- ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
+ AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
+ ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
if (lvalue.getAlignment().isZero())
lvalue.setAlignment(AtomicAlign);
- UseLibcall =
- (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
- AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
+ UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
+ AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
}
QualType getAtomicType() const { return AtomicTy; }
@@ -70,7 +73,7 @@ namespace {
CharUnits getAtomicAlignment() const { return AtomicAlign; }
CharUnits getValueAlignment() const { return ValueAlign; }
uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
- uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
+ uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
bool shouldUseLibcall() const { return UseLibcall; }
@@ -100,6 +103,12 @@ namespace {
AggValueSlot resultSlot,
SourceLocation loc) const;
+ /// \brief Converts a rvalue to integer value.
+ llvm::Value *convertRValueToInt(RValue RVal) const;
+
+ RValue convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot,
+ SourceLocation Loc) const;
+
/// Copy an atomic r-value into atomic-layout memory.
void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
@@ -461,11 +470,19 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
static void
AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
- SourceLocation Loc) {
+ SourceLocation Loc, CharUnits SizeInChars) {
if (UseOptimizedLibcall) {
// Load value and pass it to the function directly.
unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
- Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
+ int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
+ ValTy =
+ CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
+ llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
+ SizeInBits)->getPointerTo();
+ Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
+ Align, CGF.getContext().getPointerType(ValTy),
+ Loc);
+ // Coerce the value into an appropriately sized integer type.
Args.add(RValue::get(Val), ValTy);
} else {
// Non-optimized functions always take a reference.
@@ -576,8 +593,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
break;
}
- if (!E->getType()->isVoidType() && !Dest)
- Dest = CreateMemTemp(E->getType(), ".atomicdst");
+ QualType RValTy = E->getType().getUnqualifiedType();
+
+ auto GetDest = [&] {
+ if (!RValTy->isVoidType() && !Dest) {
+ Dest = CreateMemTemp(RValTy, ".atomicdst");
+ }
+ return Dest;
+ };
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
if (UseLibcall) {
@@ -634,7 +657,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
HaveRetTy = true;
Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
@@ -646,7 +669,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__atomic_exchange:
LibCallName = "__atomic_exchange";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
// void __atomic_store_N(T *mem, T val, int order)
@@ -657,7 +680,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
RetTy = getContext().VoidTy;
HaveRetTy = true;
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
// T __atomic_load_N(T *mem, int order)
@@ -671,35 +694,35 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_and_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_or_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_sub_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_xor_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
default: return EmitUnsupportedRValue(E, "atomic library call");
}
@@ -711,29 +734,36 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
if (!HaveRetTy) {
if (UseOptimizedLibcall) {
// Value is returned directly.
- RetTy = MemTy;
+ // The function returns an appropriately sized integer type.
+ RetTy = getContext().getIntTypeForBitwidth(
+ getContext().toBits(sizeChars), /*Signed=*/false);
} else {
// Value is returned through parameter before the order.
RetTy = getContext().VoidTy;
- Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
- getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
}
}
// order is always the last parameter
Args.add(RValue::get(Order),
getContext().IntTy);
- const CGFunctionInfo &FuncInfo =
- CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
- FunctionType::ExtInfo(), RequiredArgs::All);
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
- llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
- RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
- if (!RetTy->isVoidType())
+ RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
+ // The value is returned directly from the libcall.
+ if (HaveRetTy && !RetTy->isVoidType())
return Res;
- if (E->getType()->isVoidType())
+ // The value is returned via an explicit out param.
+ if (RetTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
+ // The value is returned directly for optimized libcalls but the caller is
+ // expected an out-param.
+ if (UseOptimizedLibcall) {
+ llvm::Value *ResVal = Res.getScalarVal();
+ llvm::StoreInst *StoreDest = Builder.CreateStore(
+ ResVal,
+ Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
+ StoreDest->setAlignment(Align);
+ }
+ return convertTempToRValue(Dest, RValTy, E->getExprLoc());
}
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
@@ -743,13 +773,15 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
E->getOp() == AtomicExpr::AO__atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load_n;
- llvm::Type *IPtrTy =
- llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
- llvm::Value *OrigDest = Dest;
- Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
- if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
- if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
- if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
+ llvm::Type *ITy =
+ llvm::IntegerType::get(getLLVMContext(), Size * 8);
+ llvm::Value *OrigDest = GetDest();
+ Ptr = Builder.CreateBitCast(
+ Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
+ if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
+ if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
+ if (Dest && !E->isCmpXChg())
+ Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
@@ -786,9 +818,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// enforce that in general.
break;
}
- if (E->getType()->isVoidType())
+ if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
+ return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
}
// Long case, when Order isn't obviously constant.
@@ -854,9 +886,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Cleanup and return
Builder.SetInsertPoint(ContBB);
- if (E->getType()->isVoidType())
+ if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
+ return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
}
llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
@@ -882,6 +914,45 @@ RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
return CGF.convertTempToRValue(addr, getValueType(), loc);
}
+RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
+ AggValueSlot ResultSlot,
+ SourceLocation Loc) const {
+ // Try not to in some easy cases.
+ assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
+ if (getEvaluationKind() == TEK_Scalar && !hasPadding()) {
+ auto *ValTy = CGF.ConvertTypeForMem(ValueTy);
+ if (ValTy->isIntegerTy()) {
+ assert(IntVal->getType() == ValTy && "Different integer types.");
+ return RValue::get(IntVal);
+ } else if (ValTy->isPointerTy())
+ return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
+ else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
+ return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
+ }
+
+ // Create a temporary. This needs to be big enough to hold the
+ // atomic integer.
+ llvm::Value *Temp;
+ bool TempIsVolatile = false;
+ CharUnits TempAlignment;
+ if (getEvaluationKind() == TEK_Aggregate) {
+ assert(!ResultSlot.isIgnored());
+ Temp = ResultSlot.getAddr();
+ TempAlignment = getValueAlignment();
+ TempIsVolatile = ResultSlot.isVolatile();
+ } else {
+ Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp");
+ TempAlignment = getAtomicAlignment();
+ }
+
+ // Slam the integer into the temporary.
+ llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
+ CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
+ ->setVolatile(TempIsVolatile);
+
+ return convertTempToRValue(Temp, ResultSlot, Loc);
+}
+
/// Emit a load from an l-value of atomic type. Note that the r-value
/// we produce is an r-value of the atomic *value* type.
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
@@ -927,50 +998,12 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
if (src.getTBAAInfo())
CGM.DecorateInstruction(load, src.getTBAAInfo());
- // Okay, turn that back into the original value type.
- QualType valueType = atomics.getValueType();
- llvm::Value *result = load;
-
// If we're ignoring an aggregate return, don't do anything.
if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
return RValue::getAggregate(nullptr, false);
- // The easiest way to do this this is to go through memory, but we
- // try not to in some easy cases.
- if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
- llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
- if (isa<llvm::IntegerType>(resultTy)) {
- assert(result->getType() == resultTy);
- result = EmitFromMemory(result, valueType);
- } else if (isa<llvm::PointerType>(resultTy)) {
- result = Builder.CreateIntToPtr(result, resultTy);
- } else {
- result = Builder.CreateBitCast(result, resultTy);
- }
- return RValue::get(result);
- }
-
- // Create a temporary. This needs to be big enough to hold the
- // atomic integer.
- llvm::Value *temp;
- bool tempIsVolatile = false;
- CharUnits tempAlignment;
- if (atomics.getEvaluationKind() == TEK_Aggregate) {
- assert(!resultSlot.isIgnored());
- temp = resultSlot.getAddr();
- tempAlignment = atomics.getValueAlignment();
- tempIsVolatile = resultSlot.isVolatile();
- } else {
- temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
- tempAlignment = atomics.getAtomicAlignment();
- }
-
- // Slam the integer into the temporary.
- llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
- Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
- ->setVolatile(tempIsVolatile);
-
- return atomics.convertTempToRValue(temp, resultSlot, loc);
+ // Okay, turn that back into the original value type.
+ return atomics.convertIntToValue(load, resultSlot, loc);
}
@@ -1023,6 +1056,32 @@ llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
return temp;
}
+llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
+ // If we've got a scalar value of the right size, try to avoid going
+ // through memory.
+ if (RVal.isScalar() && !hasPadding()) {
+ llvm::Value *Value = RVal.getScalarVal();
+ if (isa<llvm::IntegerType>(Value->getType()))
+ return Value;
+ else {
+ llvm::IntegerType *InputIntTy =
+ llvm::IntegerType::get(CGF.getLLVMContext(), getValueSizeInBits());
+ if (isa<llvm::PointerType>(Value->getType()))
+ return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
+ else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
+ return CGF.Builder.CreateBitCast(Value, InputIntTy);
+ }
+ }
+ // Otherwise, we need to go through memory.
+ // Put the r-value in memory.
+ llvm::Value *Addr = materializeRValue(RVal);
+
+ // Cast the temporary to the atomic int type and pull a value out.
+ Addr = emitCastToAtomicIntPointer(Addr);
+ return CGF.Builder.CreateAlignedLoad(Addr,
+ getAtomicAlignment().getQuantity());
+}
+
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
@@ -1064,34 +1123,7 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
}
// Okay, we're doing this natively.
- llvm::Value *intValue;
-
- // If we've got a scalar value of the right size, try to avoid going
- // through memory.
- if (rvalue.isScalar() && !atomics.hasPadding()) {
- llvm::Value *value = rvalue.getScalarVal();
- if (isa<llvm::IntegerType>(value->getType())) {
- intValue = value;
- } else {
- llvm::IntegerType *inputIntTy =
- llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
- if (isa<llvm::PointerType>(value->getType())) {
- intValue = Builder.CreatePtrToInt(value, inputIntTy);
- } else {
- intValue = Builder.CreateBitCast(value, inputIntTy);
- }
- }
-
- // Otherwise, we need to go through memory.
- } else {
- // Put the r-value in memory.
- llvm::Value *addr = atomics.materializeRValue(rvalue);
-
- // Cast the temporary to the atomic int type and pull a value out.
- addr = atomics.emitCastToAtomicIntPointer(addr);
- intValue = Builder.CreateAlignedLoad(addr,
- atomics.getAtomicAlignment().getQuantity());
- }
+ llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
// Do the atomic store.
llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
@@ -1108,6 +1140,74 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
CGM.DecorateInstruction(store, dest.getTBAAInfo());
}
+/// Emit a compare-and-exchange op for atomic type.
+///
+std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
+ LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
+ llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
+ AggValueSlot Slot) {
+ // If this is an aggregate r-value, it should agree in type except
+ // maybe for address-space qualification.
+ assert(!Expected.isAggregate() ||
+ Expected.getAggregateAddr()->getType()->getPointerElementType() ==
+ Obj.getAddress()->getType()->getPointerElementType());
+ assert(!Desired.isAggregate() ||
+ Desired.getAggregateAddr()->getType()->getPointerElementType() ==
+ Obj.getAddress()->getType()->getPointerElementType());
+ AtomicInfo Atomics(*this, Obj);
+
+ if (Failure >= Success)
+ // Don't assert on undefined behavior.
+ Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
+
+ auto Alignment = Atomics.getValueAlignment();
+ // Check whether we should use a library call.
+ if (Atomics.shouldUseLibcall()) {
+ auto *ExpectedAddr = Atomics.materializeRValue(Expected);
+ // Produce a source address.
+ auto *DesiredAddr = Atomics.materializeRValue(Desired);
+ // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // void *desired, int success, int failure);
+ CallArgList Args;
+ Args.add(RValue::get(Atomics.getAtomicSizeValue()),
+ getContext().getSizeType());
+ Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)),
+ getContext().IntTy);
+ Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)),
+ getContext().IntTy);
+ auto SuccessFailureRVal = emitAtomicLibcall(
+ *this, "__atomic_compare_exchange", getContext().BoolTy, Args);
+ auto *PreviousVal =
+ Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity());
+ return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal);
+ }
+
+ // If we've got a scalar value of the right size, try to avoid going
+ // through memory.
+ auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected);
+ auto *DesiredIntVal = Atomics.convertRValueToInt(Desired);
+
+ // Do the atomic store.
+ auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress());
+ auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal,
+ Success, Failure);
+ // Other decoration.
+ Inst->setVolatile(Obj.isVolatileQualified());
+ Inst->setWeak(IsWeak);
+
+ // Okay, turn that back into the original value type.
+ auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0);
+ auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1);
+ return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc),
+ RValue::get(SuccessFailureVal));
+}
+
void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
AtomicInfo atomics(*this, dest);
OpenPOWER on IntegriCloud