diff options
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp')
-rw-r--r-- | contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp | 303 |
1 files changed, 224 insertions, 79 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp index 0df2a40..89bde2c 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp @@ -24,16 +24,6 @@ using namespace clang; using namespace CodeGen; -// The ABI values for various atomic memory orderings. -enum AtomicOrderingKind { - AO_ABI_memory_order_relaxed = 0, - AO_ABI_memory_order_consume = 1, - AO_ABI_memory_order_acquire = 2, - AO_ABI_memory_order_release = 3, - AO_ABI_memory_order_acq_rel = 4, - AO_ABI_memory_order_seq_cst = 5 -}; - namespace { class AtomicInfo { CodeGenFunction &CGF; @@ -57,10 +47,10 @@ namespace { ASTContext &C = CGF.getContext(); uint64_t valueAlignInBits; - llvm::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy); + std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy); uint64_t atomicAlignInBits; - llvm::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy); + std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy); assert(ValueSizeInBits <= AtomicSizeInBits); assert(valueAlignInBits <= atomicAlignInBits); @@ -184,10 +174,139 @@ bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const { return true; } -static void -EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, - llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, - uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { +static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, + llvm::Value *Dest, llvm::Value *Ptr, + llvm::Value *Val1, llvm::Value *Val2, + uint64_t Size, unsigned Align, + llvm::AtomicOrdering SuccessOrder, + llvm::AtomicOrdering FailureOrder) { + // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment. + llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1); + Expected->setAlignment(Align); + llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2); + Desired->setAlignment(Align); + + llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg( + Ptr, Expected, Desired, SuccessOrder, FailureOrder); + Pair->setVolatile(E->isVolatile()); + Pair->setWeak(IsWeak); + + // Cmp holds the result of the compare-exchange operation: true on success, + // false on failure. + llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0); + llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1); + + // This basic block is used to hold the store instruction if the operation + // failed. + llvm::BasicBlock *StoreExpectedBB = + CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn); + + // This basic block is the exit point of the operation, we should end up + // here regardless of whether or not the operation succeeded. + llvm::BasicBlock *ContinueBB = + CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn); + + // Update Expected if Expected isn't equal to Old, otherwise branch to the + // exit point. + CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB); + + CGF.Builder.SetInsertPoint(StoreExpectedBB); + // Update the memory at Expected with Old's value. + llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1); + StoreExpected->setAlignment(Align); + // Finally, branch to the exit point. + CGF.Builder.CreateBr(ContinueBB); + + CGF.Builder.SetInsertPoint(ContinueBB); + // Update the memory at Dest with Cmp's value. + CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); + return; +} + +/// Given an ordering required on success, emit all possible cmpxchg +/// instructions to cope with the provided (but possibly only dynamically known) +/// FailureOrder. +static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, + bool IsWeak, llvm::Value *Dest, + llvm::Value *Ptr, llvm::Value *Val1, + llvm::Value *Val2, + llvm::Value *FailureOrderVal, + uint64_t Size, unsigned Align, + llvm::AtomicOrdering SuccessOrder) { + llvm::AtomicOrdering FailureOrder; + if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) { + switch (FO->getSExtValue()) { + default: + FailureOrder = llvm::Monotonic; + break; + case AtomicExpr::AO_ABI_memory_order_consume: + case AtomicExpr::AO_ABI_memory_order_acquire: + FailureOrder = llvm::Acquire; + break; + case AtomicExpr::AO_ABI_memory_order_seq_cst: + FailureOrder = llvm::SequentiallyConsistent; + break; + } + if (FailureOrder >= SuccessOrder) { + // Don't assert on undefined behaviour. + FailureOrder = + llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder); + } + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align, + SuccessOrder, FailureOrder); + return; + } + + // Create all the relevant BB's + llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr, + *SeqCstBB = nullptr; + MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn); + if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release) + AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn); + if (SuccessOrder == llvm::SequentiallyConsistent) + SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn); + + llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn); + + llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB); + + // Emit all the different atomics + + // MonotonicBB is arbitrarily chosen as the default case; in practice, this + // doesn't matter unless someone is crazy enough to use something that + // doesn't fold to a constant for the ordering. + CGF.Builder.SetInsertPoint(MonotonicBB); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, + Size, Align, SuccessOrder, llvm::Monotonic); + CGF.Builder.CreateBr(ContBB); + + if (AcquireBB) { + CGF.Builder.SetInsertPoint(AcquireBB); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, + Size, Align, SuccessOrder, llvm::Acquire); + CGF.Builder.CreateBr(ContBB); + SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume), + AcquireBB); + SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire), + AcquireBB); + } + if (SeqCstBB) { + CGF.Builder.SetInsertPoint(SeqCstBB); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, + Size, Align, SuccessOrder, llvm::SequentiallyConsistent); + CGF.Builder.CreateBr(ContBB); + SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst), + SeqCstBB); + } + + CGF.Builder.SetInsertPoint(ContBB); +} + +static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, + llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, + llvm::Value *IsWeak, llvm::Value *FailureOrder, + uint64_t Size, unsigned Align, + llvm::AtomicOrdering Order) { llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0; @@ -196,25 +315,43 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, llvm_unreachable("Already handled!"); case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Align, Order); + return; case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Align, Order); + return; case AtomicExpr::AO__atomic_compare_exchange: case AtomicExpr::AO__atomic_compare_exchange_n: { - // Note that cmpxchg only supports specifying one ordering and - // doesn't support weak cmpxchg, at least at the moment. - llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); - LoadVal1->setAlignment(Align); - llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2); - LoadVal2->setAlignment(Align); - llvm::AtomicCmpXchgInst *CXI = - CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order); - CXI->setVolatile(E->isVolatile()); - llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1); - StoreVal1->setAlignment(Align); - llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1); - CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); + if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) { + emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr, + Val1, Val2, FailureOrder, Size, Align, Order); + } else { + // Create all the relevant BB's + llvm::BasicBlock *StrongBB = + CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn); + llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn); + llvm::BasicBlock *ContBB = + CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn); + + llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB); + SI->addCase(CGF.Builder.getInt1(false), StrongBB); + + CGF.Builder.SetInsertPoint(StrongBB); + emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Align, Order); + CGF.Builder.CreateBr(ContBB); + + CGF.Builder.SetInsertPoint(WeakBB); + emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Align, Order); + CGF.Builder.CreateBr(ContBB); + + CGF.Builder.SetInsertPoint(ContBB); + } return; } - case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__atomic_load_n: case AtomicExpr::AO__atomic_load: { @@ -351,17 +488,18 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { bool UseLibcall = (Size != Align || getContext().toBits(sizeChars) > MaxInlineWidthInBits); - llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; - Ptr = EmitScalarExpr(E->getPtr()); + llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr, + *Val2 = nullptr; + llvm::Value *Ptr = EmitScalarExpr(E->getPtr()); if (E->getOp() == AtomicExpr::AO__c11_atomic_init) { assert(!Dest && "Init does not return a value"); LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext()); EmitAtomicInit(E->getVal1(), lvalue); - return RValue::get(0); + return RValue::get(nullptr); } - Order = EmitScalarExpr(E->getOrder()); + llvm::Value *Order = EmitScalarExpr(E->getOrder()); switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: @@ -394,9 +532,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { else Val2 = EmitValToTemp(*this, E->getVal2()); OrderFail = EmitScalarExpr(E->getOrderFail()); - // Evaluate and discard the 'weak' argument. if (E->getNumSubExprs() == 6) - EmitScalarExpr(E->getWeak()); + IsWeak = EmitScalarExpr(E->getWeak()); break; case AtomicExpr::AO__c11_atomic_fetch_add: @@ -476,6 +613,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy); std::string LibCallName; + QualType LoweredMemTy = + MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy; QualType RetTy; bool HaveRetTy = false; switch (E->getOp()) { @@ -531,7 +670,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: LibCallName = "__atomic_fetch_add"; - AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, + AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy, E->getExprLoc()); break; // T __atomic_fetch_and_N(T *mem, T val, int order) @@ -552,7 +691,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_sub: LibCallName = "__atomic_fetch_sub"; - AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, + AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy, E->getExprLoc()); break; // T __atomic_fetch_xor_N(T *mem, T val, int order) @@ -593,7 +732,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { if (!RetTy->isVoidType()) return Res; if (E->getType()->isVoidType()) - return RValue::get(0); + return RValue::get(nullptr); return convertTempToRValue(Dest, E->getType(), E->getExprLoc()); } @@ -615,32 +754,32 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { if (isa<llvm::ConstantInt>(Order)) { int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); switch (ord) { - case AO_ABI_memory_order_relaxed: - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, - llvm::Monotonic); + case AtomicExpr::AO_ABI_memory_order_relaxed: + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, Align, llvm::Monotonic); break; - case AO_ABI_memory_order_consume: - case AO_ABI_memory_order_acquire: + case AtomicExpr::AO_ABI_memory_order_consume: + case AtomicExpr::AO_ABI_memory_order_acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, - llvm::Acquire); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, Align, llvm::Acquire); break; - case AO_ABI_memory_order_release: + case AtomicExpr::AO_ABI_memory_order_release: if (IsLoad) break; // Avoid crashing on code with undefined behavior - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, - llvm::Release); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, Align, llvm::Release); break; - case AO_ABI_memory_order_acq_rel: + case AtomicExpr::AO_ABI_memory_order_acq_rel: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, - llvm::AcquireRelease); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, Align, llvm::AcquireRelease); break; - case AO_ABI_memory_order_seq_cst: - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, - llvm::SequentiallyConsistent); + case AtomicExpr::AO_ABI_memory_order_seq_cst: + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, Align, llvm::SequentiallyConsistent); break; default: // invalid order // We should not ever get here normally, but it's hard to @@ -648,15 +787,16 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { break; } if (E->getType()->isVoidType()) - return RValue::get(0); + return RValue::get(nullptr); return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc()); } // Long case, when Order isn't obviously constant. // Create all the relevant BB's - llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0, - *AcqRelBB = 0, *SeqCstBB = 0; + llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr, + *ReleaseBB = nullptr, *AcqRelBB = nullptr, + *SeqCstBB = nullptr; MonotonicBB = createBasicBlock("monotonic", CurFn); if (!IsStore) AcquireBB = createBasicBlock("acquire", CurFn); @@ -676,41 +816,46 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { // Emit all the different atomics Builder.SetInsertPoint(MonotonicBB); - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, - llvm::Monotonic); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, Align, llvm::Monotonic); Builder.CreateBr(ContBB); if (!IsStore) { Builder.SetInsertPoint(AcquireBB); - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, - llvm::Acquire); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, Align, llvm::Acquire); Builder.CreateBr(ContBB); - SI->addCase(Builder.getInt32(1), AcquireBB); - SI->addCase(Builder.getInt32(2), AcquireBB); + SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume), + AcquireBB); + SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire), + AcquireBB); } if (!IsLoad) { Builder.SetInsertPoint(ReleaseBB); - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, - llvm::Release); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, Align, llvm::Release); Builder.CreateBr(ContBB); - SI->addCase(Builder.getInt32(3), ReleaseBB); + SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release), + ReleaseBB); } if (!IsLoad && !IsStore) { Builder.SetInsertPoint(AcqRelBB); - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, - llvm::AcquireRelease); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, Align, llvm::AcquireRelease); Builder.CreateBr(ContBB); - SI->addCase(Builder.getInt32(4), AcqRelBB); + SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel), + AcqRelBB); } Builder.SetInsertPoint(SeqCstBB); - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, - llvm::SequentiallyConsistent); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, Align, llvm::SequentiallyConsistent); Builder.CreateBr(ContBB); - SI->addCase(Builder.getInt32(5), SeqCstBB); + SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst), + SeqCstBB); // Cleanup and return Builder.SetInsertPoint(ContBB); if (E->getType()->isVoidType()) - return RValue::get(0); + return RValue::get(nullptr); return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc()); } @@ -761,8 +906,8 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, getContext().VoidPtrTy); args.add(RValue::get(EmitCastToVoidPtr(tempAddr)), getContext().VoidPtrTy); - args.add(RValue::get(llvm::ConstantInt::get(IntTy, - AO_ABI_memory_order_seq_cst)), + args.add(RValue::get(llvm::ConstantInt::get( + IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)), getContext().IntTy); emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args); @@ -788,7 +933,7 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, // If we're ignoring an aggregate return, don't do anything. if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored()) - return RValue::getAggregate(0, false); + return RValue::getAggregate(nullptr, false); // The easiest way to do this this is to go through memory, but we // try not to in some easy cases. @@ -911,8 +1056,8 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) { getContext().VoidPtrTy); args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy); - args.add(RValue::get(llvm::ConstantInt::get(IntTy, - AO_ABI_memory_order_seq_cst)), + args.add(RValue::get(llvm::ConstantInt::get( + IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)), getContext().IntTy); emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args); return; |