diff options
Diffstat (limited to 'lib/CodeGen/CGDecl.cpp')
-rw-r--r-- | lib/CodeGen/CGDecl.cpp | 339 |
1 files changed, 221 insertions, 118 deletions
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp index 8870587..5375c5e 100644 --- a/lib/CodeGen/CGDecl.cpp +++ b/lib/CodeGen/CGDecl.cpp @@ -11,10 +11,10 @@ // //===----------------------------------------------------------------------===// -#include "CGDebugInfo.h" #include "CodeGenFunction.h" -#include "CodeGenModule.h" +#include "CGDebugInfo.h" #include "CGOpenCLRuntime.h" +#include "CodeGenModule.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" #include "clang/AST/Decl.h" @@ -22,10 +22,10 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/Frontend/CodeGenOptions.h" -#include "llvm/GlobalVariable.h" -#include "llvm/Intrinsics.h" -#include "llvm/DataLayout.h" -#include "llvm/Type.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Type.h" using namespace clang; using namespace CodeGen; @@ -83,6 +83,8 @@ void CodeGenFunction::EmitDecl(const Decl &D) { case Decl::StaticAssert: // static_assert(X, ""); [C++0x] case Decl::Label: // __label__ x; case Decl::Import: + case Decl::OMPThreadPrivate: + case Decl::Empty: // None of these decls require codegen support. return; @@ -386,7 +388,9 @@ namespace { } CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, - /*ForVirtualBase=*/false, Loc); + /*ForVirtualBase=*/false, + /*Delegating=*/false, + Loc); if (NRVO) CGF.EmitBlock(SkipDtorBB); } @@ -448,6 +452,22 @@ namespace { CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args); } }; + + /// A cleanup to call @llvm.lifetime.end. + class CallLifetimeEnd : public EHScopeStack::Cleanup { + llvm::Value *Addr; + llvm::Value *Size; + public: + CallLifetimeEnd(llvm::Value *addr, llvm::Value *size) + : Addr(addr), Size(size) {} + + void Emit(CodeGenFunction &CGF, Flags flags) { + llvm::Value *castAddr = CGF.Builder.CreateBitCast(Addr, CGF.Int8PtrTy); + CGF.Builder.CreateCall2(CGF.CGM.getLLVMLifetimeEndFn(), + Size, castAddr) + ->setDoesNotThrow(); + } + }; } /// EmitAutoVarWithLifetime - Does the setup required for an automatic @@ -624,7 +644,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, if (accessedByInit && lifetime == Qualifiers::OCL_Strong) { llvm::Value *oldValue = EmitLoadOfScalar(lvalue); EmitStoreOfScalar(value, lvalue, /* isInitialization */ true); - EmitARCRelease(oldValue, /*precise*/ false); + EmitARCRelease(oldValue, ARCImpreciseLifetime); return; } @@ -752,7 +772,6 @@ static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init, // If a global is all zeros, always use a memset. if (isa<llvm::ConstantAggregateZero>(Init)) return true; - // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large, // do it if it will require 6 or fewer scalar stores. // TODO: Should budget depends on the size? Avoiding a large global warrants @@ -764,6 +783,23 @@ static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init, canEmitInitWithFewStoresAfterMemset(Init, StoreBudget); } +/// Should we use the LLVM lifetime intrinsics for the given local variable? +static bool shouldUseLifetimeMarkers(CodeGenFunction &CGF, const VarDecl &D, + unsigned Size) { + // Always emit lifetime markers in -fsanitize=use-after-scope mode. + if (CGF.getLangOpts().Sanitize.UseAfterScope) + return true; + // For now, only in optimized builds. + if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) + return false; + + // Limit the size of marked objects to 32 bytes. We don't want to increase + // compile time by marking tiny objects. + unsigned SizeThreshold = 32; + + return Size > SizeThreshold; +} + /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a /// variable declaration with auto, register, or no storage class specifier. @@ -794,85 +830,91 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { llvm::Value *DeclPtr; if (Ty->isConstantSizeType()) { - if (!Target.useGlobalsForAutomaticVariables()) { - bool NRVO = getLangOpts().ElideConstructors && - D.isNRVOVariable(); - - // If this value is a POD array or struct with a statically - // determinable constant initializer, there are optimizations we can do. - // - // TODO: We should constant-evaluate the initializer of any variable, - // as long as it is initialized by a constant expression. Currently, - // isConstantInitializer produces wrong answers for structs with - // reference or bitfield members, and a few other cases, and checking - // for POD-ness protects us from some of these. - if (D.getInit() && - (Ty->isArrayType() || Ty->isRecordType()) && - (Ty.isPODType(getContext()) || - getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) && - D.getInit()->isConstantInitializer(getContext(), false)) { - - // If the variable's a const type, and it's neither an NRVO - // candidate nor a __block variable and has no mutable members, - // emit it as a global instead. - if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef && - CGM.isTypeConstant(Ty, true)) { - EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage); - - emission.Address = 0; // signal this condition to later callbacks - assert(emission.wasEmittedAsGlobal()); - return emission; - } - - // Otherwise, tell the initialization code that we're in this case. - emission.IsConstantAggregate = true; + bool NRVO = getLangOpts().ElideConstructors && + D.isNRVOVariable(); + + // If this value is a POD array or struct with a statically + // determinable constant initializer, there are optimizations we can do. + // + // TODO: We should constant-evaluate the initializer of any variable, + // as long as it is initialized by a constant expression. Currently, + // isConstantInitializer produces wrong answers for structs with + // reference or bitfield members, and a few other cases, and checking + // for POD-ness protects us from some of these. + if (D.getInit() && + (Ty->isArrayType() || Ty->isRecordType()) && + (Ty.isPODType(getContext()) || + getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) && + D.getInit()->isConstantInitializer(getContext(), false)) { + + // If the variable's a const type, and it's neither an NRVO + // candidate nor a __block variable and has no mutable members, + // emit it as a global instead. + if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef && + CGM.isTypeConstant(Ty, true)) { + EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage); + + emission.Address = 0; // signal this condition to later callbacks + assert(emission.wasEmittedAsGlobal()); + return emission; } - // A normal fixed sized variable becomes an alloca in the entry block, - // unless it's an NRVO variable. - llvm::Type *LTy = ConvertTypeForMem(Ty); + // Otherwise, tell the initialization code that we're in this case. + emission.IsConstantAggregate = true; + } - if (NRVO) { - // The named return value optimization: allocate this variable in the - // return slot, so that we can elide the copy when returning this - // variable (C++0x [class.copy]p34). - DeclPtr = ReturnValue; - - if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { - if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) { - // Create a flag that is used to indicate when the NRVO was applied - // to this variable. Set it to zero to indicate that NRVO was not - // applied. - llvm::Value *Zero = Builder.getFalse(); - llvm::Value *NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo"); - EnsureInsertPoint(); - Builder.CreateStore(Zero, NRVOFlag); - - // Record the NRVO flag for this variable. - NRVOFlags[&D] = NRVOFlag; - emission.NRVOFlag = NRVOFlag; - } + // A normal fixed sized variable becomes an alloca in the entry block, + // unless it's an NRVO variable. + llvm::Type *LTy = ConvertTypeForMem(Ty); + + if (NRVO) { + // The named return value optimization: allocate this variable in the + // return slot, so that we can elide the copy when returning this + // variable (C++0x [class.copy]p34). + DeclPtr = ReturnValue; + + if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { + if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) { + // Create a flag that is used to indicate when the NRVO was applied + // to this variable. Set it to zero to indicate that NRVO was not + // applied. + llvm::Value *Zero = Builder.getFalse(); + llvm::Value *NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo"); + EnsureInsertPoint(); + Builder.CreateStore(Zero, NRVOFlag); + + // Record the NRVO flag for this variable. + NRVOFlags[&D] = NRVOFlag; + emission.NRVOFlag = NRVOFlag; } - } else { - if (isByRef) - LTy = BuildByRefType(&D); - - llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); - Alloc->setName(D.getName()); - - CharUnits allocaAlignment = alignment; - if (isByRef) - allocaAlignment = std::max(allocaAlignment, - getContext().toCharUnitsFromBits(Target.getPointerAlign(0))); - Alloc->setAlignment(allocaAlignment.getQuantity()); - DeclPtr = Alloc; } } else { - // Targets that don't support recursion emit locals as globals. - const char *Class = - D.getStorageClass() == SC_Register ? ".reg." : ".auto."; - DeclPtr = CreateStaticVarDecl(D, Class, - llvm::GlobalValue::InternalLinkage); + if (isByRef) + LTy = BuildByRefType(&D); + + llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); + Alloc->setName(D.getName()); + + CharUnits allocaAlignment = alignment; + if (isByRef) + allocaAlignment = std::max(allocaAlignment, + getContext().toCharUnitsFromBits(Target.getPointerAlign(0))); + Alloc->setAlignment(allocaAlignment.getQuantity()); + DeclPtr = Alloc; + + // Emit a lifetime intrinsic if meaningful. There's no point + // in doing this if we don't have a valid insertion point (?). + uint64_t size = CGM.getDataLayout().getTypeAllocSize(LTy); + if (HaveInsertPoint() && shouldUseLifetimeMarkers(*this, D, size)) { + llvm::Value *sizeV = llvm::ConstantInt::get(Int64Ty, size); + + emission.SizeForLifetimeMarkers = sizeV; + llvm::Value *castAddr = Builder.CreateBitCast(Alloc, Int8PtrTy); + Builder.CreateCall2(CGM.getLLVMLifetimeStartFn(), sizeV, castAddr) + ->setDoesNotThrow(); + } else { + assert(!emission.useLifetimeMarkers()); + } } } else { EnsureInsertPoint(); @@ -917,11 +959,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) { DI->setLocation(D.getLocation()); - if (Target.useGlobalsForAutomaticVariables()) { - DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), - &D); - } else - DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder); + DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder); } } @@ -1112,21 +1150,33 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); EmitStoreThroughLValue(rvalue, lvalue, true); - } else if (!hasAggregateLLVMType(type)) { + return; + } + switch (getEvaluationKind(type)) { + case TEK_Scalar: EmitScalarInit(init, D, lvalue, capturedByInit); - } else if (type->isAnyComplexType()) { + return; + case TEK_Complex: { ComplexPairTy complex = EmitComplexExpr(init); if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); - StoreComplexToAddr(complex, lvalue.getAddress(), lvalue.isVolatile()); - } else { - // TODO: how can we delay here if D is captured by its initializer? - EmitAggExpr(init, AggValueSlot::forLValue(lvalue, + EmitStoreOfComplex(complex, lvalue, /*init*/ true); + return; + } + case TEK_Aggregate: + if (type->isAtomicType()) { + EmitAtomicInit(const_cast<Expr*>(init), lvalue); + } else { + // TODO: how can we delay here if D is captured by its initializer? + EmitAggExpr(init, AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased)); + } MaybeEmitStdInitializerListCleanup(lvalue.getAddress(), init); + return; } + llvm_unreachable("bad evaluation kind"); } /// Enter a destroy cleanup for the given local variable. @@ -1199,6 +1249,14 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) { const VarDecl &D = *emission.Variable; + // Make sure we call @llvm.lifetime.end. This needs to happen + // *last*, so the cleanup needs to be pushed *first*. + if (emission.useLifetimeMarkers()) { + EHStack.pushCleanup<CallLifetimeEnd>(NormalCleanup, + emission.getAllocatedAddress(), + emission.getSizeForLifetimeMarkers()); + } + // Check the type for a cleanup. if (QualType::DestructionKind dtorKind = D.getType().isDestructedType()) emitAutoVarTypeCleanup(emission, dtorKind); @@ -1240,7 +1298,18 @@ CodeGenFunction::getDestroyer(QualType::DestructionKind kind) { llvm_unreachable("Unknown DestructionKind"); } -/// pushDestroy - Push the standard destructor for the given type. +/// pushEHDestroy - Push the standard destructor for the given type as +/// an EH-only cleanup. +void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind, + llvm::Value *addr, QualType type) { + assert(dtorKind && "cannot push destructor for trivial type"); + assert(needsEHCleanup(dtorKind)); + + pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true); +} + +/// pushDestroy - Push the standard destructor for the given type as +/// at least a normal cleanup. void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind, llvm::Value *addr, QualType type) { assert(dtorKind && "cannot push destructor for trivial type"); @@ -1434,10 +1503,6 @@ namespace { /// /// \param elementType - the immediate element type of the array; /// possibly still an array type -/// \param array - a value of type elementType* -/// \param destructionKind - the kind of destruction required -/// \param initializedElementCount - a value of type size_t* holding -/// the number of successfully-constructed elements void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEndPointer, QualType elementType, @@ -1453,10 +1518,6 @@ void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, /// /// \param elementType - the immediate element type of the array; /// possibly still an array type -/// \param array - a value of type elementType* -/// \param destructionKind - the kind of destruction required -/// \param initializedElementCount - a value of type size_t* holding -/// the number of successfully-constructed elements void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, @@ -1466,18 +1527,37 @@ void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, elementType, destroyer); } +/// Lazily declare the @llvm.lifetime.start intrinsic. +llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() { + if (LifetimeStartFn) return LifetimeStartFn; + LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(), + llvm::Intrinsic::lifetime_start); + return LifetimeStartFn; +} + +/// Lazily declare the @llvm.lifetime.end intrinsic. +llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() { + if (LifetimeEndFn) return LifetimeEndFn; + LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(), + llvm::Intrinsic::lifetime_end); + return LifetimeEndFn; +} + namespace { /// A cleanup to perform a release of an object at the end of a /// function. This is used to balance out the incoming +1 of a /// ns_consumed argument when we can't reasonably do that just by /// not doing the initial retain for a __block argument. struct ConsumeARCParameter : EHScopeStack::Cleanup { - ConsumeARCParameter(llvm::Value *param) : Param(param) {} + ConsumeARCParameter(llvm::Value *param, + ARCPreciseLifetime_t precise) + : Param(param), Precise(precise) {} llvm::Value *Param; + ARCPreciseLifetime_t Precise; void Emit(CodeGenFunction &CGF, Flags flags) { - CGF.EmitARCRelease(Param, /*precise*/ false); + CGF.EmitARCRelease(Param, Precise); } }; } @@ -1492,17 +1572,29 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg, Arg->setName(D.getName()); + QualType Ty = D.getType(); + // Use better IR generation for certain implicit parameters. if (isa<ImplicitParamDecl>(D)) { // The only implicit argument a block has is its literal. if (BlockInfo) { LocalDeclMap[&D] = Arg; + llvm::Value *LocalAddr = 0; + if (CGM.getCodeGenOpts().OptimizationLevel == 0) { + // Allocate a stack slot to let the debug info survive the RA. + llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), + D.getName() + ".addr"); + Alloc->setAlignment(getContext().getDeclAlign(&D).getQuantity()); + LValue lv = MakeAddrLValue(Alloc, Ty, getContext().getDeclAlign(&D)); + EmitStoreOfScalar(Arg, lv, /* isInitialization */ true); + LocalAddr = Builder.CreateLoad(Alloc); + } if (CGDebugInfo *DI = getDebugInfo()) { if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) { DI->setLocation(D.getLocation()); - DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, Builder); + DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, LocalAddr, Builder); } } @@ -1510,24 +1602,23 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg, } } - QualType Ty = D.getType(); - llvm::Value *DeclPtr; // If this is an aggregate or variable sized value, reuse the input pointer. if (!Ty->isConstantSizeType() || - CodeGenFunction::hasAggregateLLVMType(Ty)) { + !CodeGenFunction::hasScalarEvaluationKind(Ty)) { DeclPtr = Arg; } else { // Otherwise, create a temporary to hold the value. llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), D.getName() + ".addr"); - Alloc->setAlignment(getContext().getDeclAlign(&D).getQuantity()); + CharUnits Align = getContext().getDeclAlign(&D); + Alloc->setAlignment(Align.getQuantity()); DeclPtr = Alloc; bool doStore = true; Qualifiers qs = Ty.getQualifiers(); - + LValue lv = MakeAddrLValue(DeclPtr, Ty, Align); if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) { // We honor __attribute__((ns_consumed)) for types with lifetime. // For __strong, it's handled by just skipping the initial retain; @@ -1548,15 +1639,30 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg, } if (lt == Qualifiers::OCL_Strong) { - if (!isConsumed) + if (!isConsumed) { + if (CGM.getCodeGenOpts().OptimizationLevel == 0) { + // use objc_storeStrong(&dest, value) for retaining the + // object. But first, store a null into 'dest' because + // objc_storeStrong attempts to release its old value. + llvm::Value * Null = CGM.EmitNullConstant(D.getType()); + EmitStoreOfScalar(Null, lv, /* isInitialization */ true); + EmitARCStoreStrongCall(lv.getAddress(), Arg, true); + doStore = false; + } + else // Don't use objc_retainBlock for block pointers, because we // don't want to Block_copy something just because we got it // as a parameter. - Arg = EmitARCRetainNonBlock(Arg); + Arg = EmitARCRetainNonBlock(Arg); + } } else { // Push the cleanup for a consumed parameter. - if (isConsumed) - EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg); + if (isConsumed) { + ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>() + ? ARCPreciseLifetime : ARCImpreciseLifetime); + EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg, + precise); + } if (lt == Qualifiers::OCL_Weak) { EmitARCInitWeak(DeclPtr, Arg); @@ -1569,11 +1675,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg, } // Store the initial value into the alloca. - if (doStore) { - LValue lv = MakeAddrLValue(DeclPtr, Ty, - getContext().getDeclAlign(&D)); + if (doStore) EmitStoreOfScalar(Arg, lv, /* isInitialization */ true); - } } llvm::Value *&DMEntry = LocalDeclMap[&D]; |