summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2011-05-02 19:39:53 +0000
committerdim <dim@FreeBSD.org>2011-05-02 19:39:53 +0000
commit110eaaceddcec790f7e6a5e3bf1261c9aa1e73ab (patch)
tree64a10f4c4154739d4a8191d7e1b52ce497f4ebd6 /lib/CodeGen
parenta0fb00f9837bd0d2e5948f16f6a6b82a7a628f51 (diff)
downloadFreeBSD-src-110eaaceddcec790f7e6a5e3bf1261c9aa1e73ab.zip
FreeBSD-src-110eaaceddcec790f7e6a5e3bf1261c9aa1e73ab.tar.gz
Vendor import of clang trunk r130700:
http://llvm.org/svn/llvm-project/cfe/trunk@130700
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/BackendUtil.cpp29
-rw-r--r--lib/CodeGen/CGBlocks.cpp586
-rw-r--r--lib/CodeGen/CGBlocks.h4
-rw-r--r--lib/CodeGen/CGBuiltin.cpp230
-rw-r--r--lib/CodeGen/CGCXX.cpp103
-rw-r--r--lib/CodeGen/CGCXXABI.cpp4
-rw-r--r--lib/CodeGen/CGCall.cpp164
-rw-r--r--lib/CodeGen/CGCall.h26
-rw-r--r--lib/CodeGen/CGClass.cpp156
-rw-r--r--lib/CodeGen/CGCleanup.cpp23
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp399
-rw-r--r--lib/CodeGen/CGDebugInfo.h32
-rw-r--r--lib/CodeGen/CGDecl.cpp353
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp48
-rw-r--r--lib/CodeGen/CGException.cpp108
-rw-r--r--lib/CodeGen/CGException.h1
-rw-r--r--lib/CodeGen/CGExpr.cpp99
-rw-r--r--lib/CodeGen/CGExprAgg.cpp134
-rw-r--r--lib/CodeGen/CGExprCXX.cpp650
-rw-r--r--lib/CodeGen/CGExprComplex.cpp9
-rw-r--r--lib/CodeGen/CGExprConstant.cpp401
-rw-r--r--lib/CodeGen/CGExprScalar.cpp245
-rw-r--r--lib/CodeGen/CGObjC.cpp335
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp1416
-rw-r--r--lib/CodeGen/CGObjCMac.cpp364
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp310
-rw-r--r--lib/CodeGen/CGObjCRuntime.h24
-rw-r--r--lib/CodeGen/CGRTTI.cpp17
-rw-r--r--lib/CodeGen/CGRecordLayout.h5
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp188
-rw-r--r--lib/CodeGen/CGStmt.cpp360
-rw-r--r--lib/CodeGen/CGVTT.cpp45
-rw-r--r--lib/CodeGen/CGVTables.cpp539
-rw-r--r--lib/CodeGen/CGVTables.h26
-rw-r--r--lib/CodeGen/CMakeLists.txt2
-rw-r--r--lib/CodeGen/CodeGenAction.cpp2
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp138
-rw-r--r--lib/CodeGen/CodeGenFunction.h97
-rw-r--r--lib/CodeGen/CodeGenModule.cpp179
-rw-r--r--lib/CodeGen/CodeGenModule.h69
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp5
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp82
-rw-r--r--lib/CodeGen/CodeGenTypes.h14
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp55
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp2
-rw-r--r--lib/CodeGen/TargetInfo.cpp178
46 files changed, 5208 insertions, 3048 deletions
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 9897b1b..1264473 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -30,6 +30,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Transforms/Instrumentation.h"
using namespace clang;
using namespace llvm;
@@ -108,9 +109,9 @@ void EmitAssemblyHelper::CreatePasses() {
OptLevel = 0;
Inlining = CodeGenOpts.NoInlining;
}
-
+
FunctionPassManager *FPM = getPerFunctionPasses();
-
+
TargetLibraryInfo *TLI =
new TargetLibraryInfo(Triple(TheModule->getTargetTriple()));
if (!CodeGenOpts.SimplifyLibCalls)
@@ -133,8 +134,10 @@ void EmitAssemblyHelper::CreatePasses() {
//
// FIXME: Derive these constants in a principled fashion.
unsigned Threshold = 225;
- if (CodeGenOpts.OptimizeSize)
+ if (CodeGenOpts.OptimizeSize == 1) //-Os
Threshold = 75;
+ else if (CodeGenOpts.OptimizeSize == 2) //-Oz
+ Threshold = 25;
else if (OptLevel > 2)
Threshold = 275;
InliningPass = createFunctionInliningPass(Threshold);
@@ -146,12 +149,19 @@ void EmitAssemblyHelper::CreatePasses() {
}
PassManager *MPM = getPerModulePasses();
-
+
TLI = new TargetLibraryInfo(Triple(TheModule->getTargetTriple()));
if (!CodeGenOpts.SimplifyLibCalls)
TLI->disableAllFunctions();
MPM->add(TLI);
+ if (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes) {
+ MPM->add(createGCOVProfilerPass(CodeGenOpts.EmitGcovNotes,
+ CodeGenOpts.EmitGcovArcs));
+ if (!CodeGenOpts.DebugInfo)
+ MPM->add(createStripSymbolsPass(true));
+ }
+
// For now we always create per module passes.
llvm::createStandardModulePasses(MPM, OptLevel,
CodeGenOpts.OptimizeSize,
@@ -190,7 +200,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
}
// Set float ABI type.
- if (CodeGenOpts.FloatABI == "soft")
+ if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
llvm::FloatABIType = llvm::FloatABI::Soft;
else if (CodeGenOpts.FloatABI == "hard")
llvm::FloatABIType = llvm::FloatABI::Hard;
@@ -248,6 +258,8 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
}
if (llvm::TimePassesIsEnabled)
BackendArgs.push_back("-time-passes");
+ for (unsigned i = 0, e = CodeGenOpts.BackendOptions.size(); i != e; ++i)
+ BackendArgs.push_back(CodeGenOpts.BackendOptions[i].c_str());
BackendArgs.push_back(0);
llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
const_cast<char **>(&BackendArgs[0]));
@@ -266,6 +278,10 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
if (CodeGenOpts.RelaxAll)
TM->setMCRelaxAll(true);
+ if (CodeGenOpts.SaveTempLabels)
+ TM->setMCSaveTempLabels(true);
+ if (CodeGenOpts.NoDwarf2CFIAsm)
+ TM->setMCUseCFI(false);
// Create the code generator passes.
PassManager *PM = getCodeGenPasses();
@@ -319,6 +335,9 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
return;
}
+ // Before executing passes, print the final values of the LLVM options.
+ cl::PrintOptionValues();
+
// Run passes. For now we do all passes at once, but eventually we
// would like to have the option of streaming code generation.
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 9587de2..99a69a4 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -27,13 +27,16 @@ using namespace CodeGen;
CGBlockInfo::CGBlockInfo(const BlockExpr *blockExpr, const char *N)
: Name(N), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
- HasCXXObject(false), StructureType(0), Block(blockExpr) {
+ HasCXXObject(false), UsesStret(false), StructureType(0), Block(blockExpr) {
// Skip asm prefix, if any.
if (Name && Name[0] == '\01')
++Name;
}
+// Anchor the vtable to this translation unit.
+CodeGenModule::ByrefHelpers::~ByrefHelpers() {}
+
/// Build the given block as a global block.
static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
const CGBlockInfo &blockInfo,
@@ -104,23 +107,6 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType());
}
-static BlockFlags computeBlockFlag(CodeGenModule &CGM,
- const BlockExpr *BE,
- BlockFlags flags) {
- const FunctionType *ftype = BE->getFunctionType();
-
- // This is a bit overboard.
- CallArgList args;
- const CGFunctionInfo &fnInfo =
- CGM.getTypes().getFunctionInfo(ftype->getResultType(), args,
- ftype->getExtInfo());
-
- if (CGM.ReturnTypeUsesSRet(fnInfo))
- flags |= BLOCK_USE_STRET;
-
- return flags;
-}
-
/*
Purely notional variadic template describing the layout of a block.
@@ -536,7 +522,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
BlockFlags flags = BLOCK_HAS_SIGNATURE;
if (blockInfo.NeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ;
- flags = computeBlockFlag(CGM, blockInfo.getBlockExpr(), flags);
+ if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
// Initialize the block literal.
Builder.CreateStore(isa, Builder.CreateStructGEP(blockAddr, 0, "block.isa"));
@@ -630,7 +616,9 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
declRef, VK_RValue);
- EmitAnyExprToMem(&l2r, blockField, /*volatile*/ false, /*init*/ true);
+ EmitExprAsInit(&l2r, variable, blockField,
+ getContext().getDeclAlign(variable),
+ /*captured by init*/ false);
}
// Push a destructor if necessary. The semantics for when this
@@ -734,7 +722,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
// Add the block literal.
QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy);
CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy));
+ Args.add(RValue::get(BlockLiteral), VoidPtrTy);
QualType FnType = BPT->getPointeeType();
@@ -745,7 +733,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
// Load the function.
llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp");
- const FunctionType *FuncTy = FnType->getAs<FunctionType>();
+ const FunctionType *FuncTy = FnType->castAs<FunctionType>();
QualType ResultType = FuncTy->getResultType();
const CGFunctionInfo &FnInfo =
@@ -834,8 +822,9 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
fields[0] = CGM.getNSConcreteGlobalBlock();
// __flags
- BlockFlags flags = computeBlockFlag(CGM, blockInfo.getBlockExpr(),
- BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE);
+ BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE;
+ if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
+
fields[1] = llvm::ConstantInt::get(CGM.IntTy, flags.getBitMask());
// Reserved
@@ -873,7 +862,10 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const DeclMapTy &ldm) {
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
- DebugInfo = CGM.getDebugInfo();
+ // Check if we should generate debug info for this block function.
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
BlockInfo = &blockInfo;
// Arrange for local static and local extern declarations to appear
@@ -897,12 +889,12 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
ImplicitParamDecl selfDecl(const_cast<BlockDecl*>(blockDecl),
SourceLocation(), II, selfTy);
- args.push_back(std::make_pair(&selfDecl, selfTy));
+ args.push_back(&selfDecl);
// Now add the rest of the parameters.
for (BlockDecl::param_const_iterator i = blockDecl->param_begin(),
e = blockDecl->param_end(); i != e; ++i)
- args.push_back(std::make_pair(*i, (*i)->getType()));
+ args.push_back(*i);
// Create the function declaration.
const FunctionProtoType *fnType =
@@ -910,6 +902,9 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const CGFunctionInfo &fnInfo =
CGM.getTypes().getFunctionInfo(fnType->getResultType(), args,
fnType->getExtInfo());
+ if (CGM.ReturnTypeUsesSRet(fnInfo))
+ blockInfo.UsesStret = true;
+
const llvm::FunctionType *fnLLVMType =
CGM.getTypes().GetFunctionType(fnInfo, fnType->isVariadic());
@@ -921,8 +916,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
CGM.SetInternalFunctionAttributes(blockDecl, fn, fnInfo);
// Begin generating the function.
- StartFunction(blockDecl, fnType->getResultType(), fn, args,
- blockInfo.getBlockExpr()->getBody()->getLocEnd());
+ StartFunction(blockDecl, fnType->getResultType(), fn, fnInfo, args,
+ blockInfo.getBlockExpr()->getBody()->getLocStart());
CurFuncDecl = outerFnDecl; // StartFunction sets this to blockDecl
// Okay. Undo some of what StartFunction did.
@@ -1049,13 +1044,10 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
ASTContext &C = getContext();
FunctionArgList args;
- // FIXME: This leaks
- ImplicitParamDecl *dstDecl =
- ImplicitParamDecl::Create(C, 0, SourceLocation(), 0, C.VoidPtrTy);
- args.push_back(std::make_pair(dstDecl, dstDecl->getType()));
- ImplicitParamDecl *srcDecl =
- ImplicitParamDecl::Create(C, 0, SourceLocation(), 0, C.VoidPtrTy);
- args.push_back(std::make_pair(srcDecl, srcDecl->getType()));
+ ImplicitParamDecl dstDecl(0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(&srcDecl);
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
@@ -1073,20 +1065,21 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
FunctionDecl *FD = FunctionDecl::Create(C,
C.getTranslationUnitDecl(),
+ SourceLocation(),
SourceLocation(), II, C.VoidTy, 0,
SC_Static,
SC_None,
false,
true);
- StartFunction(FD, C.VoidTy, Fn, args, SourceLocation());
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
const llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
- llvm::Value *src = GetAddrOfLocalVar(srcDecl);
+ llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
src = Builder.CreateLoad(src);
src = Builder.CreateBitCast(src, structPtrTy, "block.source");
- llvm::Value *dst = GetAddrOfLocalVar(dstDecl);
+ llvm::Value *dst = GetAddrOfLocalVar(&dstDecl);
dst = Builder.CreateLoad(dst);
dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
@@ -1143,10 +1136,8 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
ASTContext &C = getContext();
FunctionArgList args;
- // FIXME: This leaks
- ImplicitParamDecl *srcDecl =
- ImplicitParamDecl::Create(C, 0, SourceLocation(), 0, C.VoidPtrTy);
- args.push_back(std::make_pair(srcDecl, srcDecl->getType()));
+ ImplicitParamDecl srcDecl(0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(&srcDecl);
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
@@ -1163,15 +1154,16 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
= &CGM.getContext().Idents.get("__destroy_helper_block_");
FunctionDecl *FD = FunctionDecl::Create(C, C.getTranslationUnitDecl(),
+ SourceLocation(),
SourceLocation(), II, C.VoidTy, 0,
SC_Static,
SC_None,
false, true);
- StartFunction(FD, C.VoidTy, Fn, args, SourceLocation());
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
const llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
- llvm::Value *src = GetAddrOfLocalVar(srcDecl);
+ llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
src = Builder.CreateLoad(src);
src = Builder.CreateBitCast(src, structPtrTy, "block");
@@ -1229,99 +1221,158 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
}
-llvm::Constant *CodeGenFunction::
-GeneratebyrefCopyHelperFunction(const llvm::Type *T, BlockFieldFlags flags,
- const VarDecl *variable) {
- QualType R = getContext().VoidTy;
-
- FunctionArgList Args;
- // FIXME: This leaks
- ImplicitParamDecl *Dst =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
- Args.push_back(std::make_pair(Dst, Dst->getType()));
-
- // FIXME: This leaks
- ImplicitParamDecl *Src =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
- Args.push_back(std::make_pair(Src, Src->getType()));
+namespace {
+
+/// Emits the copy/dispose helper functions for a __block object of id type.
+class ObjectByrefHelpers : public CodeGenModule::ByrefHelpers {
+ BlockFieldFlags Flags;
+
+public:
+ ObjectByrefHelpers(CharUnits alignment, BlockFieldFlags flags)
+ : ByrefHelpers(alignment), Flags(flags) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy);
+
+ srcField = CGF.Builder.CreateBitCast(srcField, CGF.VoidPtrPtrTy);
+ llvm::Value *srcValue = CGF.Builder.CreateLoad(srcField);
+
+ unsigned flags = (Flags | BLOCK_BYREF_CALLER).getBitMask();
+
+ llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags);
+ llvm::Value *fn = CGF.CGM.getBlockObjectAssign();
+ CGF.Builder.CreateCall3(fn, destField, srcValue, flagsVal);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0));
+ llvm::Value *value = CGF.Builder.CreateLoad(field);
+
+ CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ id.AddInteger(Flags.getBitMask());
+ }
+};
+
+/// Emits the copy/dispose helpers for a __block variable with a
+/// nontrivial copy constructor or destructor.
+class CXXByrefHelpers : public CodeGenModule::ByrefHelpers {
+ QualType VarType;
+ const Expr *CopyExpr;
+
+public:
+ CXXByrefHelpers(CharUnits alignment, QualType type,
+ const Expr *copyExpr)
+ : ByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {}
+
+ bool needsCopy() const { return CopyExpr != 0; }
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ if (!CopyExpr) return;
+ CGF.EmitSynthesizedCXXCopyCtor(destField, srcField, CopyExpr);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin();
+ CGF.PushDestructorCleanup(VarType, field);
+ CGF.PopCleanupBlocks(cleanupDepth);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr());
+ }
+};
+} // end anonymous namespace
+
+static llvm::Constant *
+generateByrefCopyHelper(CodeGenFunction &CGF,
+ const llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &byrefInfo) {
+ ASTContext &Context = CGF.getContext();
+
+ QualType R = Context.VoidTy;
+
+ FunctionArgList args;
+ ImplicitParamDecl dst(0, SourceLocation(), 0, Context.VoidPtrTy);
+ args.push_back(&dst);
+
+ ImplicitParamDecl src(0, SourceLocation(), 0, Context.VoidPtrTy);
+ args.push_back(&src);
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+ CGF.CGM.getTypes().getFunctionInfo(R, args, FunctionType::ExtInfo());
- CodeGenTypes &Types = CGM.getTypes();
+ CodeGenTypes &Types = CGF.CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- "__Block_byref_object_copy_", &CGM.getModule());
+ "__Block_byref_object_copy_", &CGF.CGM.getModule());
IdentifierInfo *II
- = &CGM.getContext().Idents.get("__Block_byref_object_copy_");
+ = &Context.Idents.get("__Block_byref_object_copy_");
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
+ FunctionDecl *FD = FunctionDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
SourceLocation(), II, R, 0,
SC_Static,
SC_None,
false, true);
- StartFunction(FD, R, Fn, Args, SourceLocation());
-
- // dst->x
- llvm::Value *V = GetAddrOfLocalVar(Dst);
- V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
- V = Builder.CreateLoad(V);
- V = Builder.CreateStructGEP(V, 6, "x");
- llvm::Value *DstObj = V;
-
- // src->x
- V = GetAddrOfLocalVar(Src);
- V = Builder.CreateLoad(V);
- V = Builder.CreateBitCast(V, T);
- V = Builder.CreateStructGEP(V, 6, "x");
-
- if (Expr *copyExpr = getContext().getBlockVarCopyInits(variable)) {
- llvm::Value *SrcObj = V;
- EmitSynthesizedCXXCopyCtor(DstObj, SrcObj, copyExpr);
- } else {
- DstObj = Builder.CreateBitCast(DstObj, VoidPtrTy);
- V = Builder.CreateBitCast(V, VoidPtrPtrTy);
- llvm::Value *SrcObj = Builder.CreateLoad(V);
- flags |= BLOCK_BYREF_CALLER;
- llvm::Value *N = llvm::ConstantInt::get(Int32Ty, flags.getBitMask());
- llvm::Value *F = CGM.getBlockObjectAssign();
- Builder.CreateCall3(F, DstObj, SrcObj, N);
- }
-
- FinishFunction();
+ CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
- return llvm::ConstantExpr::getBitCast(Fn, Int8PtrTy);
+ if (byrefInfo.needsCopy()) {
+ const llvm::Type *byrefPtrType = byrefType.getPointerTo(0);
+
+ // dst->x
+ llvm::Value *destField = CGF.GetAddrOfLocalVar(&dst);
+ destField = CGF.Builder.CreateLoad(destField);
+ destField = CGF.Builder.CreateBitCast(destField, byrefPtrType);
+ destField = CGF.Builder.CreateStructGEP(destField, 6, "x");
+
+ // src->x
+ llvm::Value *srcField = CGF.GetAddrOfLocalVar(&src);
+ srcField = CGF.Builder.CreateLoad(srcField);
+ srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType);
+ srcField = CGF.Builder.CreateStructGEP(srcField, 6, "x");
+
+ byrefInfo.emitCopy(CGF, destField, srcField);
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy);
}
-llvm::Constant *
-CodeGenFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
- BlockFieldFlags flags,
- const VarDecl *variable) {
- QualType R = getContext().VoidTy;
+/// Build the copy helper for a __block variable.
+static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM,
+ const llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &info) {
+ CodeGenFunction CGF(CGM);
+ return generateByrefCopyHelper(CGF, byrefType, info);
+}
- FunctionArgList Args;
- // FIXME: This leaks
- ImplicitParamDecl *Src =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
+/// Generate code for a __block variable's dispose helper.
+static llvm::Constant *
+generateByrefDisposeHelper(CodeGenFunction &CGF,
+ const llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &byrefInfo) {
+ ASTContext &Context = CGF.getContext();
+ QualType R = Context.VoidTy;
- Args.push_back(std::make_pair(Src, Src->getType()));
+ FunctionArgList args;
+ ImplicitParamDecl src(0, SourceLocation(), 0, Context.VoidPtrTy);
+ args.push_back(&src);
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+ CGF.CGM.getTypes().getFunctionInfo(R, args, FunctionType::ExtInfo());
- CodeGenTypes &Types = CGM.getTypes();
+ CodeGenTypes &Types = CGF.CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
// FIXME: We'd like to put these into a mergable by content, with
@@ -1329,81 +1380,255 @@ CodeGenFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__Block_byref_object_dispose_",
- &CGM.getModule());
+ &CGF.CGM.getModule());
IdentifierInfo *II
- = &CGM.getContext().Idents.get("__Block_byref_object_dispose_");
+ = &Context.Idents.get("__Block_byref_object_dispose_");
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
+ FunctionDecl *FD = FunctionDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
SourceLocation(), II, R, 0,
SC_Static,
SC_None,
false, true);
- StartFunction(FD, R, Fn, Args, SourceLocation());
+ CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
+
+ if (byrefInfo.needsDispose()) {
+ llvm::Value *V = CGF.GetAddrOfLocalVar(&src);
+ V = CGF.Builder.CreateLoad(V);
+ V = CGF.Builder.CreateBitCast(V, byrefType.getPointerTo(0));
+ V = CGF.Builder.CreateStructGEP(V, 6, "x");
- llvm::Value *V = GetAddrOfLocalVar(Src);
- V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
- V = Builder.CreateLoad(V);
- V = Builder.CreateStructGEP(V, 6, "x");
+ byrefInfo.emitDispose(CGF, V);
+ }
- // If it's not any kind of special object, it must have a destructor
- // or something.
- if (!flags.isSpecialPointer()) {
- EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
- PushDestructorCleanup(variable->getType(), V);
- PopCleanupBlocks(CleanupDepth);
+ CGF.FinishFunction();
- // Otherwise, call _Block_object_dispose.
- } else {
- V = Builder.CreateBitCast(V, llvm::PointerType::get(Int8PtrTy, 0));
- V = Builder.CreateLoad(V);
+ return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy);
+}
- flags |= BLOCK_BYREF_CALLER;
- BuildBlockRelease(V, flags);
+/// Build the dispose helper for a __block variable.
+static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM,
+ const llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &info) {
+ CodeGenFunction CGF(CGM);
+ return generateByrefDisposeHelper(CGF, byrefType, info);
+}
+
+///
+template <class T> static T *buildByrefHelpers(CodeGenModule &CGM,
+ const llvm::StructType &byrefTy,
+ T &byrefInfo) {
+ // Increase the field's alignment to be at least pointer alignment,
+ // since the layout of the byref struct will guarantee at least that.
+ byrefInfo.Alignment = std::max(byrefInfo.Alignment,
+ CharUnits::fromQuantity(CGM.PointerAlignInBytes));
+
+ llvm::FoldingSetNodeID id;
+ byrefInfo.Profile(id);
+
+ void *insertPos;
+ CodeGenModule::ByrefHelpers *node
+ = CGM.ByrefHelpersCache.FindNodeOrInsertPos(id, insertPos);
+ if (node) return static_cast<T*>(node);
+
+ byrefInfo.CopyHelper = buildByrefCopyHelper(CGM, byrefTy, byrefInfo);
+ byrefInfo.DisposeHelper = buildByrefDisposeHelper(CGM, byrefTy, byrefInfo);
+
+ T *copy = new (CGM.getContext()) T(byrefInfo);
+ CGM.ByrefHelpersCache.InsertNode(copy, insertPos);
+ return copy;
+}
+
+CodeGenModule::ByrefHelpers *
+CodeGenFunction::buildByrefHelpers(const llvm::StructType &byrefType,
+ const AutoVarEmission &emission) {
+ const VarDecl &var = *emission.Variable;
+ QualType type = var.getType();
+
+ if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
+ const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(&var);
+ if (!copyExpr && record->hasTrivialDestructor()) return 0;
+
+ CXXByrefHelpers byrefInfo(emission.Alignment, type, copyExpr);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
}
- FinishFunction();
+ BlockFieldFlags flags;
+ if (type->isBlockPointerType()) {
+ flags |= BLOCK_FIELD_IS_BLOCK;
+ } else if (CGM.getContext().isObjCNSObjectType(type) ||
+ type->isObjCObjectPointerType()) {
+ flags |= BLOCK_FIELD_IS_OBJECT;
+ } else {
+ return 0;
+ }
- return llvm::ConstantExpr::getBitCast(Fn, Int8PtrTy);
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+
+ ObjectByrefHelpers byrefInfo(emission.Alignment, flags);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
}
-llvm::Constant *CodeGenModule::BuildbyrefCopyHelper(const llvm::Type *T,
- BlockFieldFlags flags,
- unsigned align,
- const VarDecl *var) {
- // All alignments below pointer alignment are bumped up, as we
- // always have at least that much alignment to begin with.
- if (align < PointerAlignInBytes) align = PointerAlignInBytes;
+unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
+ assert(ByRefValueInfo.count(VD) && "Did not find value!");
- // As an optimization, we only generate a single function of each kind we
- // might need. We need a different one for each alignment and for each
- // setting of flags. We mix Align and flag to get the kind.
- uint64_t Kind = (uint64_t)align*BLOCK_BYREF_CURRENT_MAX + flags.getBitMask();
- llvm::Constant *&Entry = AssignCache[Kind];
- if (!Entry)
- Entry = CodeGenFunction(*this).
- GeneratebyrefCopyHelperFunction(T, flags, var);
- return Entry;
+ return ByRefValueInfo.find(VD)->second.second;
+}
+
+llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
+ const VarDecl *V) {
+ llvm::Value *Loc = Builder.CreateStructGEP(BaseAddr, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc);
+ Loc = Builder.CreateStructGEP(Loc, getByRefValueLLVMField(V),
+ V->getNameAsString());
+ return Loc;
}
-llvm::Constant *CodeGenModule::BuildbyrefDestroyHelper(const llvm::Type *T,
- BlockFieldFlags flags,
- unsigned align,
- const VarDecl *var) {
- // All alignments below pointer alignment are bumped up, as we
- // always have at least that much alignment to begin with.
- if (align < PointerAlignInBytes) align = PointerAlignInBytes;
+/// BuildByRefType - This routine changes a __block variable declared as T x
+/// into:
+///
+/// struct {
+/// void *__isa;
+/// void *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__copy_helper; // only if needed
+/// void *__destroy_helper; // only if needed
+/// char padding[X]; // only if needed
+/// T x;
+/// } x
+///
+const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
+ std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
+ if (Info.first)
+ return Info.first;
+
+ QualType Ty = D->getType();
+
+ std::vector<const llvm::Type *> Types;
+
+ llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(getLLVMContext());
+
+ // void *__isa;
+ Types.push_back(Int8PtrTy);
+
+ // void *__forwarding;
+ Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
+
+ // int32_t __flags;
+ Types.push_back(Int32Ty);
+
+ // int32_t __size;
+ Types.push_back(Int32Ty);
+
+ bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty);
+ if (HasCopyAndDispose) {
+ /// void *__copy_helper;
+ Types.push_back(Int8PtrTy);
+
+ /// void *__destroy_helper;
+ Types.push_back(Int8PtrTy);
+ }
+
+ bool Packed = false;
+ CharUnits Align = getContext().getDeclAlign(D);
+ if (Align > getContext().toCharUnitsFromBits(Target.getPointerAlign(0))) {
+ // We have to insert padding.
+
+ // The struct above has 2 32-bit integers.
+ unsigned CurrentOffsetInBytes = 4 * 2;
+
+ // And either 2 or 4 pointers.
+ CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
+ CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
+
+ // Align the offset.
+ unsigned AlignedOffsetInBytes =
+ llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
+
+ unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
+ if (NumPaddingBytes > 0) {
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
+ // FIXME: We need a sema error for alignment larger than the minimum of
+ // the maximal stack alignmint and the alignment of malloc on the system.
+ if (NumPaddingBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
+
+ Types.push_back(Ty);
+
+ // We want a packed struct.
+ Packed = true;
+ }
+ }
+
+ // T x;
+ Types.push_back(ConvertTypeForMem(Ty));
+
+ const llvm::Type *T = llvm::StructType::get(getLLVMContext(), Types, Packed);
+
+ cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
+ CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
+ ByRefTypeHolder.get());
- // As an optimization, we only generate a single function of each kind we
- // might need. We need a different one for each alignment and for each
- // setting of flags. We mix Align and flag to get the kind.
- uint64_t Kind = (uint64_t)align*BLOCK_BYREF_CURRENT_MAX + flags.getBitMask();
- llvm::Constant *&Entry = DestroyCache[Kind];
- if (!Entry)
- Entry = CodeGenFunction(*this).
- GeneratebyrefDestroyHelperFunction(T, flags, var);
- return Entry;
+ Info.first = ByRefTypeHolder.get();
+
+ Info.second = Types.size() - 1;
+
+ return Info.first;
+}
+
+/// Initialize the structural components of a __block variable, i.e.
+/// everything but the actual object.
+void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
+ // Find the address of the local.
+ llvm::Value *addr = emission.Address;
+
+ // That's an alloca of the byref structure type.
+ const llvm::StructType *byrefType = cast<llvm::StructType>(
+ cast<llvm::PointerType>(addr->getType())->getElementType());
+
+ // Build the byref helpers if necessary. This is null if we don't need any.
+ CodeGenModule::ByrefHelpers *helpers =
+ buildByrefHelpers(*byrefType, emission);
+
+ const VarDecl &D = *emission.Variable;
+ QualType type = D.getType();
+
+ llvm::Value *V;
+
+ // Initialize the 'isa', which is just 0 or 1.
+ int isa = 0;
+ if (type.isObjCGCWeak())
+ isa = 1;
+ V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa");
+ Builder.CreateStore(V, Builder.CreateStructGEP(addr, 0, "byref.isa"));
+
+ // Store the address of the variable into its own forwarding pointer.
+ Builder.CreateStore(addr,
+ Builder.CreateStructGEP(addr, 1, "byref.forwarding"));
+
+ // Blocks ABI:
+ // c) the flags field is set to either 0 if no helper functions are
+ // needed or BLOCK_HAS_COPY_DISPOSE if they are,
+ BlockFlags flags;
+ if (helpers) flags |= BLOCK_HAS_COPY_DISPOSE;
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ Builder.CreateStructGEP(addr, 2, "byref.flags"));
+
+ CharUnits byrefSize = CGM.GetTargetTypeStoreSize(byrefType);
+ V = llvm::ConstantInt::get(IntTy, byrefSize.getQuantity());
+ Builder.CreateStore(V, Builder.CreateStructGEP(addr, 3, "byref.size"));
+
+ if (helpers) {
+ llvm::Value *copy_helper = Builder.CreateStructGEP(addr, 4);
+ Builder.CreateStore(helpers->CopyHelper, copy_helper);
+
+ llvm::Value *destroy_helper = Builder.CreateStructGEP(addr, 5);
+ Builder.CreateStore(helpers->DisposeHelper, destroy_helper);
+ }
}
void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
@@ -1413,3 +1638,26 @@ void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
N = llvm::ConstantInt::get(Int32Ty, flags.getBitMask());
Builder.CreateCall2(F, V, N);
}
+
+namespace {
+ struct CallBlockRelease : EHScopeStack::Cleanup {
+ llvm::Value *Addr;
+ CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
+ }
+ };
+}
+
+/// Enter a cleanup to destroy a __block variable. Note that this
+/// cleanup should be a no-op if the variable hasn't left the stack
+/// yet; if a cleanup is required for the variable itself, that needs
+/// to be done externally.
+void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
+ // We don't enter this cleanup if we're in pure-GC mode.
+ if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ return;
+
+ EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, emission.Address);
+}
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 0bc8bca..9bd18e5 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -173,6 +173,10 @@ public:
/// need to be run even in GC mode.
bool HasCXXObject : 1;
+ /// UsesStret : True if the block uses an stret return. Mutable
+ /// because it gets set later in the block-creation process.
+ mutable bool UsesStret : 1;
+
const llvm::StructType *StructureType;
const BlockExpr *Block;
CharUnits BlockSize;
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 5eeb605..7a0c8da 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -176,7 +176,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E) {
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
- if (E->Evaluate(Result, CGM.getContext())) {
+ if (E->Evaluate(Result, CGM.getContext()) &&
+ !Result.hasSideEffects()) {
if (Result.Val.isInt())
return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
Result.Val.getInt()));
@@ -312,9 +313,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin_expect: {
// FIXME: pass expect through to LLVM
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
if (E->getArg(1)->HasSideEffects(getContext()))
(void)EmitScalarExpr(E->getArg(1));
- return RValue::get(EmitScalarExpr(E->getArg(0)));
+ return RValue::get(ArgValue);
}
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
@@ -473,7 +475,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
Builder.SetInsertPoint(End);
PHINode *Result =
- Builder.CreatePHI(ConvertType(E->getArg(0)->getType()),
+ Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
"fpclassify_result");
// if (V==0) return FP_ZERO
@@ -543,6 +545,22 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Address);
}
+ case Builtin::BI__builtin___memcpy_chk: {
+ // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ if (!E->getArg(2)->isEvaluatable(CGM.getContext()) ||
+ !E->getArg(3)->isEvaluatable(CGM.getContext()))
+ break;
+ llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext());
+ llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext());
+ if (Size.ugt(DstSize))
+ break;
+ Value *Dest = EmitScalarExpr(E->getArg(0));
+ Value *Src = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ Builder.CreateMemCpy(Dest, Src, SizeVal, 1, false);
+ return RValue::get(Dest);
+ }
+
case Builtin::BI__builtin_objc_memmove_collectable: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
@@ -551,7 +569,23 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Address, SrcAddr, SizeVal);
return RValue::get(Address);
}
-
+
+ case Builtin::BI__builtin___memmove_chk: {
+ // fold __builtin_memmove_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ if (!E->getArg(2)->isEvaluatable(CGM.getContext()) ||
+ !E->getArg(3)->isEvaluatable(CGM.getContext()))
+ break;
+ llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext());
+ llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext());
+ if (Size.ugt(DstSize))
+ break;
+ Value *Dest = EmitScalarExpr(E->getArg(0));
+ Value *Src = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ Builder.CreateMemMove(Dest, Src, SizeVal, 1, false);
+ return RValue::get(Dest);
+ }
+
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove: {
Value *Address = EmitScalarExpr(E->getArg(0));
@@ -569,6 +603,23 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false);
return RValue::get(Address);
}
+ case Builtin::BI__builtin___memset_chk: {
+ // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ if (!E->getArg(2)->isEvaluatable(CGM.getContext()) ||
+ !E->getArg(3)->isEvaluatable(CGM.getContext()))
+ break;
+ llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext());
+ llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext());
+ if (Size.ugt(DstSize))
+ break;
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ Builder.getInt8Ty());
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false);
+
+ return RValue::get(Address);
+ }
case Builtin::BI__builtin_dwarf_cfa: {
// The offset in bytes from the first argument to the CFA.
//
@@ -721,6 +772,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_bool_compare_and_swap:
case Builtin::BI__sync_lock_test_and_set:
case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_swap:
assert(0 && "Shouldn't make it through sema");
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
@@ -860,6 +912,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+
case Builtin::BI__sync_lock_test_and_set_1:
case Builtin::BI__sync_lock_test_and_set_2:
case Builtin::BI__sync_lock_test_and_set_4:
@@ -948,8 +1007,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return EmitCall(E->getCallee()->getType(),
CGM.getBuiltinLibFunction(FD, BuiltinID),
- ReturnValueSlot(),
- E->arg_begin(), E->arg_end());
+ ReturnValueSlot(), E->arg_begin(), E->arg_end(), FD);
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
@@ -1116,13 +1174,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == ARM::BI__clear_cache) {
const FunctionDecl *FD = E->getDirectCallee();
- Value *a = EmitScalarExpr(E->getArg(0));
- Value *b = EmitScalarExpr(E->getArg(1));
+ // Oddly people write this call without args on occasion and gcc accepts
+ // it - it's also marked as varargs in the description file.
+ llvm::SmallVector<Value*, 2> Ops;
+ for (unsigned i = 0; i < E->getNumArgs(); i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
llvm::StringRef Name = FD->getName();
- return Builder.CreateCall2(CGM.CreateRuntimeFunction(FTy, Name),
- a, b);
+ return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ Ops.begin(), Ops.end());
}
llvm::SmallVector<Value*, 4> Ops;
@@ -1462,9 +1523,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, &Ty, 1),
Ops, "vmul");
case ARM::BI__builtin_neon_vmull_v:
- assert(poly && "vmull builtin only supported for polynomial types");
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmullp, &Ty, 1),
- Ops, "vmull");
+ Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
+ Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmull");
case ARM::BI__builtin_neon_vpadal_v:
case ARM::BI__builtin_neon_vpadalq_v: {
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
@@ -2082,6 +2143,149 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
+ case X86::BI__builtin_ia32_loaddqu: {
+ const llvm::Type *VecTy = ConvertType(E->getType());
+ const llvm::Type *IntTy = llvm::IntegerType::get(getLLVMContext(), 128);
+
+ Value *BC = Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(IntTy),
+ "cast");
+ LoadInst *LI = Builder.CreateLoad(BC);
+ LI->setAlignment(1); // Unaligned load.
+ return Builder.CreateBitCast(LI, VecTy, "loadu.cast");
+ }
+ // 3DNow!
+ case X86::BI__builtin_ia32_pavgusb:
+ case X86::BI__builtin_ia32_pf2id:
+ case X86::BI__builtin_ia32_pfacc:
+ case X86::BI__builtin_ia32_pfadd:
+ case X86::BI__builtin_ia32_pfcmpeq:
+ case X86::BI__builtin_ia32_pfcmpge:
+ case X86::BI__builtin_ia32_pfcmpgt:
+ case X86::BI__builtin_ia32_pfmax:
+ case X86::BI__builtin_ia32_pfmin:
+ case X86::BI__builtin_ia32_pfmul:
+ case X86::BI__builtin_ia32_pfrcp:
+ case X86::BI__builtin_ia32_pfrcpit1:
+ case X86::BI__builtin_ia32_pfrcpit2:
+ case X86::BI__builtin_ia32_pfrsqrt:
+ case X86::BI__builtin_ia32_pfrsqit1:
+ case X86::BI__builtin_ia32_pfrsqrtit1:
+ case X86::BI__builtin_ia32_pfsub:
+ case X86::BI__builtin_ia32_pfsubr:
+ case X86::BI__builtin_ia32_pi2fd:
+ case X86::BI__builtin_ia32_pmulhrw:
+ case X86::BI__builtin_ia32_pf2iw:
+ case X86::BI__builtin_ia32_pfnacc:
+ case X86::BI__builtin_ia32_pfpnacc:
+ case X86::BI__builtin_ia32_pi2fw:
+ case X86::BI__builtin_ia32_pswapdsf:
+ case X86::BI__builtin_ia32_pswapdsi: {
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ switch(BuiltinID) {
+ case X86::BI__builtin_ia32_pavgusb:
+ name = "pavgusb";
+ ID = Intrinsic::x86_3dnow_pavgusb;
+ break;
+ case X86::BI__builtin_ia32_pf2id:
+ name = "pf2id";
+ ID = Intrinsic::x86_3dnow_pf2id;
+ break;
+ case X86::BI__builtin_ia32_pfacc:
+ name = "pfacc";
+ ID = Intrinsic::x86_3dnow_pfacc;
+ break;
+ case X86::BI__builtin_ia32_pfadd:
+ name = "pfadd";
+ ID = Intrinsic::x86_3dnow_pfadd;
+ break;
+ case X86::BI__builtin_ia32_pfcmpeq:
+ name = "pfcmpeq";
+ ID = Intrinsic::x86_3dnow_pfcmpeq;
+ break;
+ case X86::BI__builtin_ia32_pfcmpge:
+ name = "pfcmpge";
+ ID = Intrinsic::x86_3dnow_pfcmpge;
+ break;
+ case X86::BI__builtin_ia32_pfcmpgt:
+ name = "pfcmpgt";
+ ID = Intrinsic::x86_3dnow_pfcmpgt;
+ break;
+ case X86::BI__builtin_ia32_pfmax:
+ name = "pfmax";
+ ID = Intrinsic::x86_3dnow_pfmax;
+ break;
+ case X86::BI__builtin_ia32_pfmin:
+ name = "pfmin";
+ ID = Intrinsic::x86_3dnow_pfmin;
+ break;
+ case X86::BI__builtin_ia32_pfmul:
+ name = "pfmul";
+ ID = Intrinsic::x86_3dnow_pfmul;
+ break;
+ case X86::BI__builtin_ia32_pfrcp:
+ name = "pfrcp";
+ ID = Intrinsic::x86_3dnow_pfrcp;
+ break;
+ case X86::BI__builtin_ia32_pfrcpit1:
+ name = "pfrcpit1";
+ ID = Intrinsic::x86_3dnow_pfrcpit1;
+ break;
+ case X86::BI__builtin_ia32_pfrcpit2:
+ name = "pfrcpit2";
+ ID = Intrinsic::x86_3dnow_pfrcpit2;
+ break;
+ case X86::BI__builtin_ia32_pfrsqrt:
+ name = "pfrsqrt";
+ ID = Intrinsic::x86_3dnow_pfrsqrt;
+ break;
+ case X86::BI__builtin_ia32_pfrsqit1:
+ case X86::BI__builtin_ia32_pfrsqrtit1:
+ name = "pfrsqit1";
+ ID = Intrinsic::x86_3dnow_pfrsqit1;
+ break;
+ case X86::BI__builtin_ia32_pfsub:
+ name = "pfsub";
+ ID = Intrinsic::x86_3dnow_pfsub;
+ break;
+ case X86::BI__builtin_ia32_pfsubr:
+ name = "pfsubr";
+ ID = Intrinsic::x86_3dnow_pfsubr;
+ break;
+ case X86::BI__builtin_ia32_pi2fd:
+ name = "pi2fd";
+ ID = Intrinsic::x86_3dnow_pi2fd;
+ break;
+ case X86::BI__builtin_ia32_pmulhrw:
+ name = "pmulhrw";
+ ID = Intrinsic::x86_3dnow_pmulhrw;
+ break;
+ case X86::BI__builtin_ia32_pf2iw:
+ name = "pf2iw";
+ ID = Intrinsic::x86_3dnowa_pf2iw;
+ break;
+ case X86::BI__builtin_ia32_pfnacc:
+ name = "pfnacc";
+ ID = Intrinsic::x86_3dnowa_pfnacc;
+ break;
+ case X86::BI__builtin_ia32_pfpnacc:
+ name = "pfpnacc";
+ ID = Intrinsic::x86_3dnowa_pfpnacc;
+ break;
+ case X86::BI__builtin_ia32_pi2fw:
+ name = "pi2fw";
+ ID = Intrinsic::x86_3dnowa_pi2fw;
+ break;
+ case X86::BI__builtin_ia32_pswapdsf:
+ case X86::BI__builtin_ia32_pswapdsi:
+ name = "pswapd";
+ ID = Intrinsic::x86_3dnowa_pswapd;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ }
}
}
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 7ffc6e7..184147c 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -194,39 +194,44 @@ void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
EmitGlobal(GlobalDecl(D, Ctor_Base));
}
-void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
- CXXCtorType Type) {
+void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *ctor,
+ CXXCtorType ctorType) {
// The complete constructor is equivalent to the base constructor
// for classes with no virtual bases. Try to emit it as an alias.
- if (Type == Ctor_Complete &&
- !D->getParent()->getNumVBases() &&
- !TryEmitDefinitionAsAlias(GlobalDecl(D, Ctor_Complete),
- GlobalDecl(D, Ctor_Base)))
+ if (ctorType == Ctor_Complete &&
+ !ctor->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(ctor, Ctor_Complete),
+ GlobalDecl(ctor, Ctor_Base)))
return;
- llvm::Function *Fn = cast<llvm::Function>(GetAddrOfCXXConstructor(D, Type));
- setFunctionLinkage(D, Fn);
+ const CGFunctionInfo &fnInfo = getTypes().getFunctionInfo(ctor, ctorType);
- CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
+ llvm::Function *fn =
+ cast<llvm::Function>(GetAddrOfCXXConstructor(ctor, ctorType, &fnInfo));
+ setFunctionLinkage(ctor, fn);
- SetFunctionDefinitionAttributes(D, Fn);
- SetLLVMFunctionAttributesForDefinition(D, Fn);
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(ctor, ctorType), fn, fnInfo);
+
+ SetFunctionDefinitionAttributes(ctor, fn);
+ SetLLVMFunctionAttributesForDefinition(ctor, fn);
}
llvm::GlobalValue *
-CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
- CXXCtorType Type) {
- GlobalDecl GD(D, Type);
+CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
+ CXXCtorType ctorType,
+ const CGFunctionInfo *fnInfo) {
+ GlobalDecl GD(ctor, ctorType);
- llvm::StringRef Name = getMangledName(GD);
- if (llvm::GlobalValue *V = GetGlobalValue(Name))
- return V;
-
- const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
- const llvm::FunctionType *FTy =
- getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type),
- FPT->isVariadic());
- return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD,
+ llvm::StringRef name = getMangledName(GD);
+ if (llvm::GlobalValue *existing = GetGlobalValue(name))
+ return existing;
+
+ if (!fnInfo) fnInfo = &getTypes().getFunctionInfo(ctor, ctorType);
+
+ const FunctionProtoType *proto = ctor->getType()->castAs<FunctionProtoType>();
+ const llvm::FunctionType *fnType =
+ getTypes().GetFunctionType(*fnInfo, proto->isVariadic());
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
/*ForVTable=*/false));
}
@@ -246,45 +251,51 @@ void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
EmitGlobal(GlobalDecl(D, Dtor_Base));
}
-void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
- CXXDtorType Type) {
+void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
+ CXXDtorType dtorType) {
// The complete destructor is equivalent to the base destructor for
// classes with no virtual bases, so try to emit it as an alias.
- if (Type == Dtor_Complete &&
- !D->getParent()->getNumVBases() &&
- !TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Complete),
- GlobalDecl(D, Dtor_Base)))
+ if (dtorType == Dtor_Complete &&
+ !dtor->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(dtor, Dtor_Complete),
+ GlobalDecl(dtor, Dtor_Base)))
return;
// The base destructor is equivalent to the base destructor of its
// base class if there is exactly one non-virtual base class with a
// non-trivial destructor, there are no fields with a non-trivial
// destructor, and the body of the destructor is trivial.
- if (Type == Dtor_Base && !TryEmitBaseDestructorAsAlias(D))
+ if (dtorType == Dtor_Base && !TryEmitBaseDestructorAsAlias(dtor))
return;
- llvm::Function *Fn = cast<llvm::Function>(GetAddrOfCXXDestructor(D, Type));
- setFunctionLinkage(D, Fn);
+ const CGFunctionInfo &fnInfo = getTypes().getFunctionInfo(dtor, dtorType);
- CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
+ llvm::Function *fn =
+ cast<llvm::Function>(GetAddrOfCXXDestructor(dtor, dtorType, &fnInfo));
+ setFunctionLinkage(dtor, fn);
- SetFunctionDefinitionAttributes(D, Fn);
- SetLLVMFunctionAttributesForDefinition(D, Fn);
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(dtor, dtorType), fn, fnInfo);
+
+ SetFunctionDefinitionAttributes(dtor, fn);
+ SetLLVMFunctionAttributesForDefinition(dtor, fn);
}
llvm::GlobalValue *
-CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
- CXXDtorType Type) {
- GlobalDecl GD(D, Type);
+CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
+ CXXDtorType dtorType,
+ const CGFunctionInfo *fnInfo) {
+ GlobalDecl GD(dtor, dtorType);
+
+ llvm::StringRef name = getMangledName(GD);
+ if (llvm::GlobalValue *existing = GetGlobalValue(name))
+ return existing;
- llvm::StringRef Name = getMangledName(GD);
- if (llvm::GlobalValue *V = GetGlobalValue(Name))
- return V;
+ if (!fnInfo) fnInfo = &getTypes().getFunctionInfo(dtor, dtorType);
- const llvm::FunctionType *FTy =
- getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false);
+ const llvm::FunctionType *fnType =
+ getTypes().GetFunctionType(*fnInfo, false);
- return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD,
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
/*ForVTable=*/false));
}
@@ -334,7 +345,7 @@ CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
MD = MD->getCanonicalDecl();
uint64_t VTableIndex = CGM.getVTables().getMethodVTableIndex(MD);
uint64_t AddressPoint =
- CGM.getVTables().getAddressPoint(BaseSubobject(RD, 0), RD);
+ CGM.getVTables().getAddressPoint(BaseSubobject(RD, CharUnits::Zero()), RD);
VTableIndex += AddressPoint;
llvm::Value *VFuncPtr =
Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
@@ -369,7 +380,7 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
uint64_t VTableIndex =
CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
uint64_t AddressPoint =
- CGM.getVTables().getAddressPoint(BaseSubobject(RD, 0), RD);
+ CGM.getVTables().getAddressPoint(BaseSubobject(RD, CharUnits::Zero()), RD);
VTableIndex += AddressPoint;
llvm::Value *VFuncPtr =
Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index 8373b66..92f1c63 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -115,7 +115,7 @@ bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
return true;
}
-void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params) {
+void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
// FIXME: I'm not entirely sure I like using a fake decl just for code
@@ -124,7 +124,7 @@ void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params) {
= ImplicitParamDecl::Create(CGM.getContext(), 0, MD->getLocation(),
&CGM.getContext().Idents.get("this"),
MD->getThisType(CGM.getContext()));
- Params.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
+ params.push_back(ThisDecl);
getThisDecl(CGF) = ThisDecl;
}
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index ae84b61..a765f0f 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -36,6 +36,8 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
+ case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
+ case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
// TODO: add support for CC_X86Pascal to llvm
}
}
@@ -104,6 +106,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
if (D->hasAttr<PascalAttr>())
return CC_X86Pascal;
+ if (PcsAttr *PCS = D->getAttr<PcsAttr>())
+ return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
+
return CC_C;
}
@@ -188,6 +193,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
ArgTys,
FunctionType::ExtInfo(
/*NoReturn*/ false,
+ /*HasRegParm*/ false,
/*RegParm*/ 0,
getCallingConventionForDecl(MD)));
}
@@ -212,7 +218,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
llvm::SmallVector<CanQualType, 16> ArgTys;
for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
- ArgTys.push_back(Context.getCanonicalParamType(i->second));
+ ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
}
@@ -223,10 +229,15 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
llvm::SmallVector<CanQualType, 16> ArgTys;
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
- ArgTys.push_back(Context.getCanonicalParamType(i->second));
+ ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
}
+const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
+ llvm::SmallVector<CanQualType, 1> args;
+ return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
+}
+
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
const FunctionType::ExtInfo &Info,
@@ -250,7 +261,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
return *FI;
// Construct the function info.
- FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy,
+ FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getHasRegParm(), Info.getRegParm(), ResTy,
ArgTys.data(), ArgTys.size());
FunctionInfos.InsertNode(FI, InsertPos);
@@ -279,13 +290,13 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
}
CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
- bool _NoReturn, unsigned _RegParm,
+ bool _NoReturn, bool _HasRegParm, unsigned _RegParm,
CanQualType ResTy,
const CanQualType *ArgTys,
unsigned NumArgTys)
: CallingConvention(_CallingConvention),
EffectiveCallingConvention(_CallingConvention),
- NoReturn(_NoReturn), RegParm(_RegParm)
+ NoReturn(_NoReturn), HasRegParm(_HasRegParm), RegParm(_RegParm)
{
NumArgs = NumArgTys;
@@ -622,7 +633,8 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
ResultType = llvm::Type::getVoidTy(getLLVMContext());
const llvm::Type *STy = ConvertType(RetTy, IsRecursive);
- ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
+ unsigned AS = Context.getTargetAddressSpace(RetTy);
+ ArgTys.push_back(llvm::PointerType::get(STy, AS));
break;
}
@@ -704,7 +716,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs |= llvm::Attribute::NoUnwind;
else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
- if (FPT && FPT->hasEmptyExceptionSpec())
+ if (FPT && FPT->isNothrow(getContext()))
FuncAttrs |= llvm::Attribute::NoUnwind;
}
@@ -756,8 +768,10 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
// FIXME: RegParm should be reduced in case of global register variable.
- signed RegParm = FI.getRegParm();
- if (!RegParm)
+ signed RegParm;
+ if (FI.getHasRegParm())
+ RegParm = FI.getRegParm();
+ else
RegParm = CodeGenOpts.NumRegisterParameters;
unsigned PointerWidth = getContext().Target.getPointerWidth(0);
@@ -826,6 +840,26 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
}
+/// An argument came in as a promoted argument; demote it back to its
+/// declared type.
+static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
+ const VarDecl *var,
+ llvm::Value *value) {
+ const llvm::Type *varType = CGF.ConvertType(var->getType());
+
+ // This can happen with promotions that actually don't change the
+ // underlying type, like the enum promotions.
+ if (value->getType() == varType) return value;
+
+ assert((varType->isIntegerTy() || varType->isFloatingPointTy())
+ && "unexpected promotion type");
+
+ if (isa<llvm::IntegerType>(varType))
+ return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
+
+ return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
+}
+
void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function *Fn,
const FunctionArgList &Args) {
@@ -856,13 +890,17 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.");
+ unsigned ArgNo = 1;
CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
- i != e; ++i, ++info_it) {
- const VarDecl *Arg = i->first;
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i, ++info_it, ++ArgNo) {
+ const VarDecl *Arg = *i;
QualType Ty = info_it->type;
const ABIArgInfo &ArgI = info_it->info;
+ bool isPromoted =
+ isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
+
switch (ArgI.getKind()) {
case ABIArgInfo::Indirect: {
llvm::Value *V = AI;
@@ -880,8 +918,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// copy.
const llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
CharUnits Size = getContext().getTypeSizeInChars(Ty);
- Builder.CreateMemCpy(Builder.CreateBitCast(AlignedTemp, I8PtrTy),
- Builder.CreateBitCast(V, I8PtrTy),
+ llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
+ llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
+ Builder.CreateMemCpy(Dst,
+ Src,
llvm::ConstantInt::get(IntPtrTy,
Size.getQuantity()),
ArgI.getIndirectAlign(),
@@ -892,13 +932,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Load scalar value from indirect argument.
CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
- if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
- // This must be a promotion, for something like
- // "void a(x) short x; {..."
- V = EmitScalarConversion(V, Ty, Arg->getType());
- }
+
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
}
- EmitParmDecl(*Arg, V);
+ EmitParmDecl(*Arg, V, ArgNo);
break;
}
@@ -914,12 +952,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (Arg->getType().isRestrictQualified())
AI->addAttr(llvm::Attribute::NoAlias);
- if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
- // This must be a promotion, for something like
- // "void a(x) short x; {..."
- V = EmitScalarConversion(V, Ty, Arg->getType());
- }
- EmitParmDecl(*Arg, V);
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
+
+ EmitParmDecl(*Arg, V, ArgNo);
break;
}
@@ -968,13 +1004,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Match to what EmitParmDecl is expecting for this type.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
- if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
- // This must be a promotion, for something like
- // "void a(x) short x; {..."
- V = EmitScalarConversion(V, Ty, Arg->getType());
- }
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
}
- EmitParmDecl(*Arg, V);
+ EmitParmDecl(*Arg, V, ArgNo);
continue; // Skip ++AI increment, already done.
}
@@ -985,7 +1018,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
llvm::Function::arg_iterator End =
ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
- EmitParmDecl(*Arg, Temp);
+ EmitParmDecl(*Arg, Temp, ArgNo);
// Name the arguments used in expansion and increment AI.
unsigned Index = 0;
@@ -997,9 +1030,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Ignore:
// Initialize the local variable appropriately.
if (hasAggregateLLVMType(Ty))
- EmitParmDecl(*Arg, CreateMemTemp(Ty));
+ EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
else
- EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
+ EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
+ ArgNo);
// Skip increment, no matching LLVM parameter.
continue;
@@ -1091,42 +1125,48 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
Ret->setDebugLoc(RetDbgLoc);
}
-RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
+void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
+ const VarDecl *param) {
// StartFunction converted the ABI-lowered parameter(s) into a
// local alloca. We need to turn that into an r-value suitable
// for EmitCall.
- llvm::Value *Local = GetAddrOfLocalVar(Param);
+ llvm::Value *local = GetAddrOfLocalVar(param);
- QualType ArgType = Param->getType();
+ QualType type = param->getType();
// For the most part, we just need to load the alloca, except:
// 1) aggregate r-values are actually pointers to temporaries, and
// 2) references to aggregates are pointers directly to the aggregate.
// I don't know why references to non-aggregates are different here.
- if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
- if (hasAggregateLLVMType(RefType->getPointeeType()))
- return RValue::getAggregate(Local);
+ if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
+ if (hasAggregateLLVMType(ref->getPointeeType()))
+ return args.add(RValue::getAggregate(local), type);
// Locals which are references to scalars are represented
// with allocas holding the pointer.
- return RValue::get(Builder.CreateLoad(Local));
+ return args.add(RValue::get(Builder.CreateLoad(local)), type);
}
- if (ArgType->isAnyComplexType())
- return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
+ if (type->isAnyComplexType()) {
+ ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
+ return args.add(RValue::getComplex(complex), type);
+ }
- if (hasAggregateLLVMType(ArgType))
- return RValue::getAggregate(Local);
+ if (hasAggregateLLVMType(type))
+ return args.add(RValue::getAggregate(local), type);
- unsigned Alignment = getContext().getDeclAlign(Param).getQuantity();
- return RValue::get(EmitLoadOfScalar(Local, false, Alignment, ArgType));
+ unsigned alignment = getContext().getDeclAlign(param).getQuantity();
+ llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
+ return args.add(RValue::get(value), type);
}
-RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
- if (ArgType->isReferenceType())
- return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
+void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
+ QualType type) {
+ if (type->isReferenceType())
+ return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
+ type);
- return EmitAnyExprToTemp(E);
+ args.add(EmitAnyExprToTemp(E), type);
}
/// Emits a call or invoke instruction to the given function, depending
@@ -1177,18 +1217,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
I != E; ++I, ++info_it) {
const ABIArgInfo &ArgInfo = info_it->info;
- RValue RV = I->first;
+ RValue RV = I->RV;
unsigned Alignment =
- getContext().getTypeAlignInChars(I->second).getQuantity();
+ getContext().getTypeAlignInChars(I->Ty).getQuantity();
switch (ArgInfo.getKind()) {
case ABIArgInfo::Indirect: {
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
- Args.push_back(CreateMemTemp(I->second));
+ Args.push_back(CreateMemTemp(I->Ty));
if (RV.isScalar())
EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
- Alignment, I->second);
+ Alignment, I->Ty);
else
StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
} else {
@@ -1215,11 +1255,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// FIXME: Avoid the conversion through memory if possible.
llvm::Value *SrcPtr;
if (RV.isScalar()) {
- SrcPtr = CreateMemTemp(I->second, "coerce");
- EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment,
- I->second);
+ SrcPtr = CreateMemTemp(I->Ty, "coerce");
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment, I->Ty);
} else if (RV.isComplex()) {
- SrcPtr = CreateMemTemp(I->second, "coerce");
+ SrcPtr = CreateMemTemp(I->Ty, "coerce");
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
} else
SrcPtr = RV.getAggregateAddr();
@@ -1257,7 +1296,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
case ABIArgInfo::Expand:
- ExpandTypeToArgs(I->second, RV, Args);
+ ExpandTypeToArgs(I->Ty, RV, Args);
break;
}
}
@@ -1275,7 +1314,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (CE->getOpcode() == llvm::Instruction::BitCast &&
ActualFT->getReturnType() == CurFT->getReturnType() &&
ActualFT->getNumParams() == CurFT->getNumParams() &&
- ActualFT->getNumParams() == Args.size()) {
+ ActualFT->getNumParams() == Args.size() &&
+ (CurFT->isVarArg() || !ActualFT->isVarArg())) {
bool ArgsMatch = true;
for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 41e707a..3f600c0 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -44,15 +44,29 @@ namespace clang {
namespace CodeGen {
typedef llvm::SmallVector<llvm::AttributeWithIndex, 8> AttributeListType;
+ struct CallArg {
+ RValue RV;
+ QualType Ty;
+ CallArg(RValue rv, QualType ty)
+ : RV(rv), Ty(ty)
+ { }
+ };
+
/// CallArgList - Type for representing both the value and type of
/// arguments in a call.
- typedef llvm::SmallVector<std::pair<RValue, QualType>, 16> CallArgList;
+ class CallArgList :
+ public llvm::SmallVector<CallArg, 16> {
+ public:
+ void add(RValue rvalue, QualType type) {
+ push_back(CallArg(rvalue, type));
+ }
+ };
/// FunctionArgList - Type for representing both the decl and type
/// of parameters to a function. The decl must be either a
/// ParmVarDecl or ImplicitParamDecl.
- typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>,
- 16> FunctionArgList;
+ class FunctionArgList : public llvm::SmallVector<const VarDecl*, 16> {
+ };
/// CGFunctionInfo - Class to encapsulate the information about a
/// function definition.
@@ -77,6 +91,7 @@ namespace CodeGen {
ArgInfo *Args;
/// How many arguments to pass inreg.
+ bool HasRegParm;
unsigned RegParm;
public:
@@ -84,7 +99,7 @@ namespace CodeGen {
typedef ArgInfo *arg_iterator;
CGFunctionInfo(unsigned CallingConvention, bool NoReturn,
- unsigned RegParm, CanQualType ResTy,
+ bool HasRegParm, unsigned RegParm, CanQualType ResTy,
const CanQualType *ArgTys, unsigned NumArgTys);
~CGFunctionInfo() { delete[] Args; }
@@ -110,6 +125,7 @@ namespace CodeGen {
EffectiveCallingConvention = Value;
}
+ bool getHasRegParm() const { return HasRegParm; }
unsigned getRegParm() const { return RegParm; }
CanQualType getReturnType() const { return Args[0].type; }
@@ -120,6 +136,7 @@ namespace CodeGen {
void Profile(llvm::FoldingSetNodeID &ID) {
ID.AddInteger(getCallingConvention());
ID.AddBoolean(NoReturn);
+ ID.AddBoolean(HasRegParm);
ID.AddInteger(RegParm);
getReturnType().Profile(ID);
for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
@@ -133,6 +150,7 @@ namespace CodeGen {
Iterator end) {
ID.AddInteger(Info.getCC());
ID.AddBoolean(Info.getNoReturn());
+ ID.AddBoolean(Info.getHasRegParm());
ID.AddInteger(Info.getRegParm());
ResTy.Profile(ID);
for (; begin != end; ++begin) {
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index cd28bbe..ca8b657 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -22,12 +22,12 @@
using namespace clang;
using namespace CodeGen;
-static uint64_t
+static CharUnits
ComputeNonVirtualBaseClassOffset(ASTContext &Context,
const CXXRecordDecl *DerivedClass,
CastExpr::path_const_iterator Start,
CastExpr::path_const_iterator End) {
- uint64_t Offset = 0;
+ CharUnits Offset = CharUnits::Zero();
const CXXRecordDecl *RD = DerivedClass;
@@ -42,13 +42,12 @@ ComputeNonVirtualBaseClassOffset(ASTContext &Context,
cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
// Add the offset.
- Offset += Layout.getBaseClassOffsetInBits(BaseDecl);
+ Offset += Layout.getBaseClassOffset(BaseDecl);
RD = BaseDecl;
}
- // FIXME: We should not use / 8 here.
- return Offset / 8;
+ return Offset;
}
llvm::Constant *
@@ -57,16 +56,16 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
CastExpr::path_const_iterator PathEnd) {
assert(PathBegin != PathEnd && "Base path should not be empty!");
- uint64_t Offset =
+ CharUnits Offset =
ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
PathBegin, PathEnd);
- if (!Offset)
+ if (Offset.isZero())
return 0;
const llvm::Type *PtrDiffTy =
Types.ConvertType(getContext().getPointerDiffType());
- return llvm::ConstantInt::get(PtrDiffTy, Offset);
+ return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
}
/// Gets the address of a direct base class within a complete object.
@@ -85,20 +84,20 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
== ConvertType(Derived));
// Compute the offset of the virtual base.
- uint64_t Offset;
+ CharUnits Offset;
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
if (BaseIsVirtual)
- Offset = Layout.getVBaseClassOffsetInBits(Base);
+ Offset = Layout.getVBaseClassOffset(Base);
else
- Offset = Layout.getBaseClassOffsetInBits(Base);
+ Offset = Layout.getBaseClassOffset(Base);
// Shift and cast down to the base type.
// TODO: for complete types, this should be possible with a GEP.
llvm::Value *V = This;
- if (Offset) {
+ if (Offset.isPositive()) {
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
V = Builder.CreateBitCast(V, Int8PtrTy);
- V = Builder.CreateConstInBoundsGEP1_64(V, Offset / 8);
+ V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
}
V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
@@ -107,13 +106,14 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
static llvm::Value *
ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
- uint64_t NonVirtual, llvm::Value *Virtual) {
+ CharUnits NonVirtual, llvm::Value *Virtual) {
const llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
llvm::Value *NonVirtualOffset = 0;
- if (NonVirtual)
- NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy, NonVirtual);
+ if (!NonVirtual.isZero())
+ NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy,
+ NonVirtual.getQuantity());
llvm::Value *BaseOffset;
if (Virtual) {
@@ -150,7 +150,7 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
++Start;
}
- uint64_t NonVirtualOffset =
+ CharUnits NonVirtualOffset =
ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
Start, PathEnd);
@@ -158,7 +158,7 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
const llvm::Type *BasePtrTy =
ConvertType((PathEnd[-1])->getType())->getPointerTo();
- if (!NonVirtualOffset && !VBase) {
+ if (NonVirtualOffset.isZero() && !VBase) {
// Just cast back.
return Builder.CreateBitCast(Value, BasePtrTy);
}
@@ -172,9 +172,7 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
CastNotNull = createBasicBlock("cast.notnull");
CastEnd = createBasicBlock("cast.end");
- llvm::Value *IsNull =
- Builder.CreateICmpEQ(Value,
- llvm::Constant::getNullValue(Value->getType()));
+ llvm::Value *IsNull = Builder.CreateIsNull(Value);
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
@@ -187,14 +185,15 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
- uint64_t VBaseOffset = Layout.getVBaseClassOffsetInBits(VBase);
- NonVirtualOffset += VBaseOffset / 8;
+ CharUnits VBaseOffset = Layout.getVBaseClassOffset(VBase);
+ NonVirtualOffset += VBaseOffset;
} else
VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
}
// Apply the offsets.
- Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset,
+ Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
+ NonVirtualOffset,
VirtualOffset);
// Cast back.
@@ -206,8 +205,7 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
Builder.CreateBr(CastEnd);
EmitBlock(CastEnd);
- llvm::PHINode *PHI = Builder.CreatePHI(Value->getType());
- PHI->reserveOperandSpace(2);
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
PHI->addIncoming(Value, CastNotNull);
PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
CastNull);
@@ -246,9 +244,7 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
CastNotNull = createBasicBlock("cast.notnull");
CastEnd = createBasicBlock("cast.end");
- llvm::Value *IsNull =
- Builder.CreateICmpEQ(Value,
- llvm::Constant::getNullValue(Value->getType()));
+ llvm::Value *IsNull = Builder.CreateIsNull(Value);
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
@@ -267,8 +263,7 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
Builder.CreateBr(CastEnd);
EmitBlock(CastEnd);
- llvm::PHINode *PHI = Builder.CreatePHI(Value->getType());
- PHI->reserveOperandSpace(2);
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
PHI->addIncoming(Value, CastNotNull);
PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
CastNull);
@@ -304,9 +299,9 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
} else {
const ASTRecordLayout &Layout =
CGF.getContext().getASTRecordLayout(RD);
- uint64_t BaseOffset = ForVirtualBase ?
- Layout.getVBaseClassOffsetInBits(Base) :
- Layout.getBaseClassOffsetInBits(Base);
+ CharUnits BaseOffset = ForVirtualBase ?
+ Layout.getVBaseClassOffset(Base) :
+ Layout.getBaseClassOffset(Base);
SubVTTIndex =
CGF.CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
@@ -407,7 +402,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
- if (CGF.CGM.getLangOptions().areExceptionsEnabled() &&
+ if (CGF.CGM.getLangOptions().Exceptions &&
!BaseClassDecl->hasTrivialDestructor())
CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
isBaseVirtual);
@@ -589,8 +584,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// we know we're in a copy constructor.
unsigned SrcArgIndex = Args.size() - 1;
llvm::Value *SrcPtr
- = CGF.Builder.CreateLoad(
- CGF.GetAddrOfLocalVar(Args[SrcArgIndex].first));
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
// Copy the aggregate.
@@ -606,7 +600,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit, FieldType, 0);
- if (!CGF.CGM.getLangOptions().areExceptionsEnabled())
+ if (!CGF.CGM.getLangOptions().Exceptions)
return;
// FIXME: If we have an array of classes w/ non-trivial destructors,
@@ -664,6 +658,10 @@ static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
return false;
+ // FIXME: Decide if we can do a delegation of a delegating constructor.
+ if (Ctor->isDelegatingConstructor())
+ return false;
+
return true;
}
@@ -716,6 +714,9 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
CXXCtorType CtorType,
FunctionArgList &Args) {
+ if (CD->isDelegatingConstructor())
+ return EmitDelegatingCXXConstructorCall(CD, Args);
+
const CXXRecordDecl *ClassDecl = CD->getParent();
llvm::SmallVector<CXXCtorInitializer *, 8> MemberInitializers;
@@ -727,8 +728,10 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
if (Member->isBaseInitializer())
EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
- else
+ else if (Member->isAnyMemberInitializer())
MemberInitializers.push_back(Member);
+ else
+ llvm_unreachable("Delegating initializer on non-delegating constructor");
}
InitializeVTablePointers(ClassDecl);
@@ -1196,15 +1199,14 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
CallArgList Args;
// Push the this ptr.
- Args.push_back(std::make_pair(RValue::get(This),
- D->getThisType(getContext())));
+ Args.add(RValue::get(This), D->getThisType(getContext()));
// Push the src ptr.
QualType QT = *(FPT->arg_type_begin());
const llvm::Type *t = CGM.getTypes().ConvertType(QT);
Src = Builder.CreateBitCast(Src, t);
- Args.push_back(std::make_pair(RValue::get(Src), QT));
+ Args.add(RValue::get(Src), QT);
// Skip over first argument (Src).
++ArgBeg;
@@ -1212,9 +1214,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1,
E = FPT->arg_type_end(); I != E; ++I, ++Arg) {
assert(Arg != ArgEnd && "Running over edge of argument list!");
- QualType ArgType = *I;
- Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
- ArgType));
+ EmitCallArg(Args, *Arg, *I);
}
// Either we've emitted all the call args, or we have a call to a
// variadic function.
@@ -1223,8 +1223,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
// If we still have any arguments, emit them using the type of the argument.
for (; Arg != ArgEnd; ++Arg) {
QualType ArgType = Arg->getType();
- Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
- ArgType));
+ EmitCallArg(Args, *Arg, ArgType);
}
QualType ResultType = FPT->getResultType();
@@ -1243,30 +1242,26 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
assert(I != E && "no parameters to constructor");
// this
- DelegateArgs.push_back(std::make_pair(RValue::get(LoadCXXThis()),
- I->second));
+ DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
++I;
// vtt
if (llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(Ctor, CtorType),
/*ForVirtualBase=*/false)) {
QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
- DelegateArgs.push_back(std::make_pair(RValue::get(VTT), VoidPP));
+ DelegateArgs.add(RValue::get(VTT), VoidPP);
if (CodeGenVTables::needsVTTParameter(CurGD)) {
assert(I != E && "cannot skip vtt parameter, already done with args");
- assert(I->second == VoidPP && "skipping parameter not of vtt type");
+ assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
++I;
}
}
// Explicit arguments.
for (; I != E; ++I) {
- const VarDecl *Param = I->first;
- QualType ArgType = Param->getType(); // because we're passing it to itself
- RValue Arg = EmitDelegateCallArg(Param);
-
- DelegateArgs.push_back(std::make_pair(Arg, ArgType));
+ const VarDecl *param = *I;
+ EmitDelegateCallArg(DelegateArgs, param);
}
EmitCall(CGM.getTypes().getFunctionInfo(Ctor, CtorType),
@@ -1274,6 +1269,19 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
ReturnValueSlot(), DelegateArgs, Ctor);
}
+void
+CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ const FunctionArgList &Args) {
+ assert(Ctor->isDelegatingConstructor());
+
+ llvm::Value *ThisPtr = LoadCXXThis();
+
+ AggValueSlot AggSlot = AggValueSlot::forAddr(ThisPtr, false, /*Lifetime*/ true);
+
+ EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
+}
+
+
void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
bool ForVirtualBase,
@@ -1317,6 +1325,7 @@ void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
if (ClassDecl->hasTrivialDestructor()) return;
const CXXDestructorDecl *D = ClassDecl->getDestructor();
+ assert(D && D->isUsed() && "destructor not marked as used!");
PushDestructorCleanup(D, Addr);
}
@@ -1325,11 +1334,12 @@ CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) {
llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy);
- int64_t VBaseOffsetOffset =
+ CharUnits VBaseOffsetOffset =
CGM.getVTables().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
llvm::Value *VBaseOffsetPtr =
- Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset, "vbase.offset.ptr");
+ Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
+ "vbase.offset.ptr");
const llvm::Type *PtrDiffTy =
ConvertType(getContext().getPointerDiffType());
@@ -1344,7 +1354,7 @@ CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
void
CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
- uint64_t OffsetFromNearestVBase,
+ CharUnits OffsetFromNearestVBase,
llvm::Constant *VTable,
const CXXRecordDecl *VTableClass) {
const CXXRecordDecl *RD = Base.getBase();
@@ -1374,23 +1384,23 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
// Compute where to store the address point.
llvm::Value *VirtualOffset = 0;
- uint64_t NonVirtualOffset = 0;
+ CharUnits NonVirtualOffset = CharUnits::Zero();
if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) {
// We need to use the virtual base offset offset because the virtual base
// might have a different offset in the most derived class.
VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass,
NearestVBase);
- NonVirtualOffset = OffsetFromNearestVBase / 8;
+ NonVirtualOffset = OffsetFromNearestVBase;
} else {
// We can just use the base offset in the complete class.
- NonVirtualOffset = Base.getBaseOffset() / 8;
+ NonVirtualOffset = Base.getBaseOffset();
}
// Apply the offsets.
llvm::Value *VTableField = LoadCXXThis();
- if (NonVirtualOffset || VirtualOffset)
+ if (!NonVirtualOffset.isZero() || VirtualOffset)
VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
NonVirtualOffset,
VirtualOffset);
@@ -1405,7 +1415,7 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
void
CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
- uint64_t OffsetFromNearestVBase,
+ CharUnits OffsetFromNearestVBase,
bool BaseIsNonVirtualPrimaryBase,
llvm::Constant *VTable,
const CXXRecordDecl *VTableClass,
@@ -1430,8 +1440,8 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
if (!BaseDecl->isDynamicClass())
continue;
- uint64_t BaseOffset;
- uint64_t BaseOffsetFromNearestVBase;
+ CharUnits BaseOffset;
+ CharUnits BaseOffsetFromNearestVBase;
bool BaseDeclIsNonVirtualPrimaryBase;
if (I->isVirtual()) {
@@ -1442,16 +1452,15 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
const ASTRecordLayout &Layout =
getContext().getASTRecordLayout(VTableClass);
- BaseOffset = Layout.getVBaseClassOffsetInBits(BaseDecl);
- BaseOffsetFromNearestVBase = 0;
+ BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase = CharUnits::Zero();
BaseDeclIsNonVirtualPrimaryBase = false;
} else {
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- BaseOffset =
- Base.getBaseOffset() + Layout.getBaseClassOffsetInBits(BaseDecl);
+ BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
BaseOffsetFromNearestVBase =
- OffsetFromNearestVBase + Layout.getBaseClassOffsetInBits(BaseDecl);
+ OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
}
@@ -1473,8 +1482,9 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
// Initialize the vtable pointers for this class and all of its bases.
VisitedVirtualBasesSetTy VBases;
- InitializeVTablePointers(BaseSubobject(RD, 0), /*NearestVBase=*/0,
- /*OffsetFromNearestVBase=*/0,
+ InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
+ /*NearestVBase=*/0,
+ /*OffsetFromNearestVBase=*/CharUnits::Zero(),
/*BaseIsNonVirtualPrimaryBase=*/false,
VTable, RD, VBases);
}
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 1d7901a..41ecd81 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -870,6 +870,29 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
}
}
+/// isObviouslyBranchWithoutCleanups - Return true if a branch to the
+/// specified destination obviously has no cleanups to run. 'false' is always
+/// a conservatively correct answer for this method.
+bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const {
+ assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
+ && "stale jump destination");
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator TopCleanup =
+ EHStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid
+ return true;
+
+ // Otherwise, we might need some cleanups.
+ return false;
+}
+
+
/// Terminate the current block by emitting a branch which might leave
/// the current cleanup-protected scope. The target scope may not yet
/// be known, in which case this will require a fixup.
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index dfd9f56..f2e1c02 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -119,6 +119,17 @@ llvm::StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
return llvm::StringRef(StrPtr, OS.tell());
}
+/// getSelectorName - Return selector name. This is used for debugging
+/// info.
+llvm::StringRef CGDebugInfo::getSelectorName(Selector S) {
+ llvm::SmallString<256> SName;
+ llvm::raw_svector_ostream OS(SName);
+ OS << S.getAsString();
+ char *StrPtr = DebugInfoNames.Allocate<char>(OS.tell());
+ memcpy(StrPtr, SName.begin(), OS.tell());
+ return llvm::StringRef(StrPtr, OS.tell());
+}
+
/// getClassName - Get class name including template argument list.
llvm::StringRef
CGDebugInfo::getClassName(RecordDecl *RD) {
@@ -306,8 +317,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
DBuilder.createMemberType("isa", getOrCreateMainFile(),
0,Size, 0, 0, 0, ISATy);
EltTys.push_back(FieldTy);
- llvm::DIArray Elements =
- DBuilder.getOrCreateArray(EltTys.data(), EltTys.size());
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
return DBuilder.createStructType(TheCU, "objc_object",
getOrCreateMainFile(),
@@ -462,8 +472,8 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
// Bit size, align and offset of the type.
// Size is always the size of a pointer. We can't use getTypeSize here
// because that does not return the correct value for references.
- uint64_t Size =
- CGM.getContext().Target.getPointerWidth(PointeeTy.getAddressSpace());
+ unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
+ uint64_t Size = CGM.getContext().Target.getPointerWidth(AS);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
return
@@ -488,7 +498,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset));
- Elements = DBuilder.getOrCreateArray(EltTys.data(), EltTys.size());
+ Elements = DBuilder.getOrCreateArray(EltTys);
EltTys.clear();
unsigned Flags = llvm::DIDescriptor::FlagAppleBlock;
@@ -522,7 +532,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
- Elements = DBuilder.getOrCreateArray(EltTys.data(), EltTys.size());
+ Elements = DBuilder.getOrCreateArray(EltTys);
EltTy = DBuilder.createStructType(Unit, "__block_literal_generic",
Unit, LineNo, FieldOffset, 0,
@@ -564,8 +574,7 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
}
- llvm::DIArray EltTypeArray =
- DBuilder.getOrCreateArray(EltTys.data(), EltTys.size());
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
llvm::DIType DbgTy = DBuilder.createSubroutineType(Unit, EltTypeArray);
return DbgTy;
@@ -609,18 +618,32 @@ void CGDebugInfo::
CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
llvm::SmallVectorImpl<llvm::Value *> &elements) {
unsigned fieldNo = 0;
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = record->hasAttr<MsStructAttr>();
+
const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
for (RecordDecl::field_iterator I = record->field_begin(),
E = record->field_end();
I != E; ++I, ++fieldNo) {
FieldDecl *field = *I;
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are ignored
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((field), LastFD) ||
+ CGM.getContext().ZeroBitfieldFollowsBitfield((field), LastFD)) {
+ --fieldNo;
+ continue;
+ }
+ LastFD = field;
+ }
llvm::StringRef name = field->getName();
QualType type = field->getType();
// Ignore unnamed fields unless they're anonymous structs/unions.
- if (name.empty() && !type->isRecordType())
+ if (name.empty() && !type->isRecordType()) {
+ LastFD = field;
continue;
+ }
llvm::DIType fieldType
= createFieldType(name, type, field->getBitWidth(),
@@ -655,9 +678,7 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
if (!Method->isStatic())
{
// "this" pointer is always first argument.
- ASTContext &Context = CGM.getContext();
- QualType ThisPtr =
- Context.getPointerType(Context.getTagDeclType(Method->getParent()));
+ QualType ThisPtr = Method->getThisType(CGM.getContext());
llvm::DIType ThisPtrType =
DBuilder.createArtificialType(getOrCreateType(ThisPtr, Unit));
@@ -669,8 +690,7 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i)
Elts.push_back(Args.getElement(i));
- llvm::DIArray EltTypeArray =
- DBuilder.getOrCreateArray(Elts.data(), Elts.size());
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
return DBuilder.createSubroutineType(Unit, EltTypeArray);
}
@@ -753,10 +773,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
Virtuality, VIndex, ContainingType,
Flags, CGM.getLangOptions().Optimize);
- // Don't cache ctors or dtors since we have to emit multiple functions for
- // a single ctor or dtor.
- if (!IsCtorOrDtor && Method->isThisDeclarationADefinition())
- SPCache[Method] = llvm::WeakVH(SP);
+ SPCache[Method] = llvm::WeakVH(SP);
return SP;
}
@@ -816,10 +833,13 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
if (BI->isVirtual()) {
// virtual base offset offset is -ve. The code generator emits dwarf
// expression where it expects +ve number.
- BaseOffset = 0 - CGM.getVTables().getVirtualBaseOffsetOffset(RD, Base);
+ BaseOffset =
+ 0 - CGM.getVTables().getVirtualBaseOffsetOffset(RD, Base).getQuantity();
BFlags = llvm::DIDescriptor::FlagVirtual;
} else
BaseOffset = RL.getBaseClassOffsetInBits(Base);
+ // FIXME: Inconsistent units for BaseOffset. It is in bytes when
+ // BI->isVirtual() and bits when not.
AccessSpecifier Access = BI->getAccessSpecifier();
if (Access == clang::AS_private)
@@ -835,6 +855,60 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
}
}
+/// CollectTemplateParams - A helper function to collect template parameters.
+llvm::DIArray CGDebugInfo::
+CollectTemplateParams(const TemplateParameterList *TPList,
+ const TemplateArgumentList &TAList,
+ llvm::DIFile Unit) {
+ llvm::SmallVector<llvm::Value *, 16> TemplateParams;
+ for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
+ const TemplateArgument &TA = TAList[i];
+ const NamedDecl *ND = TPList->getParam(i);
+ if (TA.getKind() == TemplateArgument::Type) {
+ llvm::DIType TTy = getOrCreateType(TA.getAsType(), Unit);
+ llvm::DITemplateTypeParameter TTP =
+ DBuilder.createTemplateTypeParameter(TheCU, ND->getName(), TTy);
+ TemplateParams.push_back(TTP);
+ } else if (TA.getKind() == TemplateArgument::Integral) {
+ llvm::DIType TTy = getOrCreateType(TA.getIntegralType(), Unit);
+ llvm::DITemplateValueParameter TVP =
+ DBuilder.createTemplateValueParameter(TheCU, ND->getName(), TTy,
+ TA.getAsIntegral()->getZExtValue());
+ TemplateParams.push_back(TVP);
+ }
+ }
+ return DBuilder.getOrCreateArray(TemplateParams);
+}
+
+/// CollectFunctionTemplateParams - A helper function to collect debug
+/// info for function template parameters.
+llvm::DIArray CGDebugInfo::
+CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit) {
+ if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplateSpecialization){
+ const TemplateParameterList *TList =
+ FD->getTemplateSpecializationInfo()->getTemplate()->getTemplateParameters();
+ return
+ CollectTemplateParams(TList, *FD->getTemplateSpecializationArgs(), Unit);
+ }
+ return llvm::DIArray();
+}
+
+/// CollectCXXTemplateParams - A helper function to collect debug info for
+/// template parameters.
+llvm::DIArray CGDebugInfo::
+CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TSpecial,
+ llvm::DIFile Unit) {
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ PU = TSpecial->getSpecializedTemplateOrPartial();
+
+ TemplateParameterList *TPList = PU.is<ClassTemplateDecl *>() ?
+ PU.get<ClassTemplateDecl *>()->getTemplateParameters() :
+ PU.get<ClassTemplatePartialSpecializationDecl *>()->getTemplateParameters();
+ const TemplateArgumentList &TAList = TSpecial->getTemplateInstantiationArgs();
+ return CollectTemplateParams(TPList, TAList, Unit);
+}
+
/// getOrCreateVTablePtrType - Return debug info descriptor for vtable.
llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
if (VTablePtrType.isValid())
@@ -844,7 +918,7 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
/* Function type */
llvm::Value *STy = getOrCreateType(Context.IntTy, Unit);
- llvm::DIArray SElements = DBuilder.getOrCreateArray(&STy, 1);
+ llvm::DIArray SElements = DBuilder.getOrCreateArray(STy);
llvm::DIType SubTy = DBuilder.createSubroutineType(Unit, SElements);
unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
llvm::DIType vtbl_ptr_type = DBuilder.createPointerType(SubTy, Size, 0,
@@ -971,30 +1045,13 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
}
CollectRecordFields(RD, Unit, EltTys);
- llvm::SmallVector<llvm::Value *, 16> TemplateParams;
+ llvm::DIArray TParamsArray;
if (CXXDecl) {
CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
CollectCXXFriends(CXXDecl, Unit, EltTys, FwdDecl);
- if (ClassTemplateSpecializationDecl *TSpecial
- = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
- const TemplateArgumentList &TAL = TSpecial->getTemplateArgs();
- for (unsigned i = 0, e = TAL.size(); i != e; ++i) {
- const TemplateArgument &TA = TAL[i];
- if (TA.getKind() == TemplateArgument::Type) {
- llvm::DIType TTy = getOrCreateType(TA.getAsType(), Unit);
- llvm::DITemplateTypeParameter TTP =
- DBuilder.createTemplateTypeParameter(TheCU, TTy.getName(), TTy);
- TemplateParams.push_back(TTP);
- } else if (TA.getKind() == TemplateArgument::Integral) {
- llvm::DIType TTy = getOrCreateType(TA.getIntegralType(), Unit);
- // FIXME: Get parameter name, instead of parameter type name.
- llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateValueParameter(TheCU, TTy.getName(), TTy,
- TA.getAsIntegral()->getZExtValue());
- TemplateParams.push_back(TVP);
- }
- }
- }
+ if (const ClassTemplateSpecializationDecl *TSpecial
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ TParamsArray = CollectCXXTemplateParams(TSpecial, Unit);
}
RegionStack.pop_back();
@@ -1008,18 +1065,13 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
llvm::StringRef RDName = RD->getName();
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- llvm::DIArray Elements =
- DBuilder.getOrCreateArray(EltTys.data(), EltTys.size());
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
llvm::MDNode *RealDecl = NULL;
- if (RD->isStruct())
- RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, Elements);
- else if (RD->isUnion())
+ if (RD->isUnion())
RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, Elements);
- else {
- assert(RD->isClass() && "Unknown RecordType!");
+ Size, Align, 0, Elements);
+ else if (CXXDecl) {
RDName = getClassName(RD);
// A class's primary base or the class itself contains the vtable.
llvm::MDNode *ContainingType = NULL;
@@ -1039,13 +1091,14 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
}
else if (CXXDecl->isDynamicClass())
ContainingType = FwdDecl;
- llvm::DIArray TParamsArray =
- DBuilder.getOrCreateArray(TemplateParams.data(), TemplateParams.size());
+
RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
Size, Align, 0, 0, llvm::DIType(),
Elements, ContainingType,
TParamsArray);
- }
+ } else
+ RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, Elements);
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
@@ -1156,14 +1209,26 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
Flags = llvm::DIDescriptor::FlagPrivate;
- FieldTy = DBuilder.createMemberType(FieldName, FieldDefUnit,
- FieldLine, FieldSize, FieldAlign,
- FieldOffset, Flags, FieldTy);
+ llvm::StringRef PropertyName;
+ llvm::StringRef PropertyGetter;
+ llvm::StringRef PropertySetter;
+ unsigned PropertyAttributes = 0;
+ if (ObjCPropertyDecl *PD =
+ ID->FindPropertyVisibleInPrimaryClass(Field->getIdentifier())) {
+ PropertyName = PD->getName();
+ PropertyGetter = getSelectorName(PD->getGetterName());
+ PropertySetter = getSelectorName(PD->getSetterName());
+ PropertyAttributes = PD->getPropertyAttributes();
+ }
+ FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy,
+ PropertyName, PropertyGetter,
+ PropertySetter, PropertyAttributes);
EltTys.push_back(FieldTy);
}
- llvm::DIArray Elements =
- DBuilder.getOrCreateArray(EltTys.data(), EltTys.size());
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
RegionStack.pop_back();
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
@@ -1200,12 +1265,17 @@ llvm::DIType CGDebugInfo::CreateType(const TagType *Ty) {
llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
llvm::DIFile Unit) {
llvm::DIType ElementTy = getOrCreateType(Ty->getElementType(), Unit);
- uint64_t NumElems = Ty->getNumElements();
- if (NumElems > 0)
+ int64_t NumElems = Ty->getNumElements();
+ int64_t LowerBound = 0;
+ if (NumElems == 0)
+ // If number of elements are not known then this is an unbounded array.
+ // Use Low = 1, Hi = 0 to express such arrays.
+ LowerBound = 1;
+ else
--NumElems;
- llvm::Value *Subscript = DBuilder.getOrCreateSubrange(0, NumElems);
- llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(&Subscript, 1);
+ llvm::Value *Subscript = DBuilder.getOrCreateSubrange(LowerBound, NumElems);
+ llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
@@ -1228,6 +1298,9 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
} else if (Ty->isIncompleteArrayType()) {
Size = 0;
Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+ } else if (Ty->isDependentSizedArrayType() || Ty->isIncompleteType()) {
+ Size = 0;
+ Align = 0;
} else {
// Size and align of the whole array, not the element type.
Size = CGM.getContext().getTypeSize(Ty);
@@ -1243,18 +1316,23 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
EltTy = Ty->getElementType();
else {
while ((Ty = dyn_cast<ArrayType>(EltTy))) {
- uint64_t Upper = 0;
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+ int64_t UpperBound = 0;
+ int64_t LowerBound = 0;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty)) {
if (CAT->getSize().getZExtValue())
- Upper = CAT->getSize().getZExtValue() - 1;
+ UpperBound = CAT->getSize().getZExtValue() - 1;
+ } else
+ // This is an unbounded array. Use Low = 1, Hi = 0 to express such
+ // arrays.
+ LowerBound = 1;
+
// FIXME: Verify this is right for VLAs.
- Subscripts.push_back(DBuilder.getOrCreateSubrange(0, Upper));
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(LowerBound, UpperBound));
EltTy = Ty->getElementType();
}
}
- llvm::DIArray SubscriptArray =
- DBuilder.getOrCreateArray(Subscripts.data(), Subscripts.size());
+ llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
llvm::DIType DbgTy =
DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit),
@@ -1303,9 +1381,7 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
Info.first, Info.second, FieldOffset, 0,
PointerDiffDITy);
- llvm::DIArray Elements =
- DBuilder.getOrCreateArray(&ElementTypes[0],
- llvm::array_lengthof(ElementTypes));
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(ElementTypes);
return DBuilder.createStructType(U, llvm::StringRef("test"),
U, 0, FieldOffset,
@@ -1327,8 +1403,7 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
}
// Return a CompositeType for the enum itself.
- llvm::DIArray EltArray =
- DBuilder.getOrCreateArray(Enumerators.data(), Enumerators.size());
+ llvm::DIArray EltArray = DBuilder.getOrCreateArray(Enumerators);
llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
@@ -1366,6 +1441,7 @@ static QualType UnwrapTypeForDebugInfo(QualType T) {
break;
case Type::Attributed:
T = cast<AttributedType>(T)->getEquivalentType();
+ break;
case Type::Elaborated:
T = cast<ElaboratedType>(T)->getNamedType();
break;
@@ -1375,6 +1451,9 @@ static QualType UnwrapTypeForDebugInfo(QualType T) {
case Type::SubstTemplateTypeParm:
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
break;
+ case Type::Auto:
+ T = cast<AutoType>(T)->getDeducedType();
+ break;
}
assert(T != LastT && "Type unwrapping failed to unwrap!");
@@ -1394,7 +1473,7 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
// Unwrap the type as needed for debug information.
Ty = UnwrapTypeForDebugInfo(Ty);
-
+
// Check for existing entry.
llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
TypeCache.find(Ty.getAsOpaquePtr());
@@ -1502,6 +1581,37 @@ llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
return Ty;
}
+/// getFunctionDeclaration - Return debug info descriptor to describe method
+/// declaration for the given method definition.
+llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return llvm::DISubprogram();
+
+ // Setup context.
+ getContextDescriptor(cast<Decl>(D->getDeclContext()));
+
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ MI = SPCache.find(FD);
+ if (MI != SPCache.end()) {
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
+ if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
+ return SP;
+ }
+
+ for (FunctionDecl::redecl_iterator I = FD->redecls_begin(),
+ E = FD->redecls_end(); I != E; ++I) {
+ const FunctionDecl *NextFD = *I;
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ MI = SPCache.find(NextFD);
+ if (MI != SPCache.end()) {
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
+ if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
+ return SP;
+ }
+ }
+ return llvm::DISubprogram();
+}
+
/// EmitFunctionStart - Constructs the debug code for entering a function -
/// "llvm.dbg.func.start.".
void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
@@ -1514,9 +1624,11 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
FnBeginRegionCount.push_back(RegionStack.size());
const Decl *D = GD.getDecl();
+
unsigned Flags = 0;
llvm::DIFile Unit = getOrCreateFile(CurLoc);
llvm::DIDescriptor FDContext(Unit);
+ llvm::DIArray TParamsArray;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
@@ -1540,6 +1652,9 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
if (const NamespaceDecl *NSDecl =
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNameSpace(NSDecl);
+
+ // Collect template parameters.
+ TParamsArray = CollectFunctionTemplateParams(FD, Unit);
} else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
Name = getObjCMethodName(OMD);
Flags |= llvm::DIDescriptor::FlagPrototyped;
@@ -1557,11 +1672,14 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
unsigned LineNo = getLineNumber(CurLoc);
if (D->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
+ llvm::DIType SPTy = getOrCreateType(FnType, Unit);
+ llvm::DISubprogram SPDecl = getFunctionDeclaration(D);
llvm::DISubprogram SP =
DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
- LineNo, getOrCreateType(FnType, Unit),
+ LineNo, SPTy,
Fn->hasInternalLinkage(), true/*definition*/,
- Flags, CGM.getLangOptions().Optimize, Fn);
+ Flags, CGM.getLangOptions().Optimize, Fn,
+ TParamsArray, SPDecl);
// Push function on region stack.
llvm::MDNode *SPN = SP;
@@ -1714,15 +1832,17 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
}
CharUnits Align = CGM.getContext().getDeclAlign(VD);
- if (Align > CharUnits::fromQuantity(
- CGM.getContext().Target.getPointerAlign(0) / 8)) {
- unsigned AlignedOffsetInBytes
- = llvm::RoundUpToAlignment(FieldOffset/8, Align.getQuantity());
- unsigned NumPaddingBytes
- = AlignedOffsetInBytes - FieldOffset/8;
+ if (Align > CGM.getContext().toCharUnitsFromBits(
+ CGM.getContext().Target.getPointerAlign(0))) {
+ CharUnits FieldOffsetInBytes
+ = CGM.getContext().toCharUnitsFromBits(FieldOffset);
+ CharUnits AlignedOffsetInBytes
+ = FieldOffsetInBytes.RoundUpToAlignment(Align);
+ CharUnits NumPaddingBytes
+ = AlignedOffsetInBytes - FieldOffsetInBytes;
- if (NumPaddingBytes > 0) {
- llvm::APInt pad(32, NumPaddingBytes);
+ if (NumPaddingBytes.isPositive()) {
+ llvm::APInt pad(32, NumPaddingBytes.getQuantity());
FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy,
pad, ArrayType::Normal, 0);
EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset));
@@ -1732,7 +1852,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
FType = Type;
llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = Align.getQuantity()*8;
+ FieldAlign = CGM.getContext().toBits(Align);
*XOffset = FieldOffset;
FieldTy = DBuilder.createMemberType(VD->getName(), Unit,
@@ -1741,8 +1861,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
- llvm::DIArray Elements =
- DBuilder.getOrCreateArray(EltTys.data(), EltTys.size());
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
unsigned Flags = llvm::DIDescriptor::FlagBlockByrefStruct;
@@ -1752,7 +1871,8 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
/// EmitDeclare - Emit local variable declaration debug info.
void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
- llvm::Value *Storage, CGBuilderTy &Builder) {
+ llvm::Value *Storage,
+ unsigned ArgNo, CGBuilderTy &Builder) {
assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
@@ -1798,13 +1918,13 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
- offset =
- CharUnits::fromQuantity(CGM.getContext().Target.getPointerWidth(0)/8);
+ offset = CGM.getContext().toCharUnitsFromBits(
+ CGM.getContext().Target.getPointerWidth(0));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of x field
- offset = CharUnits::fromQuantity(XOffset/8);
+ offset = CGM.getContext().toCharUnitsFromBits(XOffset);
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
// Create the descriptor for the variable.
@@ -1812,7 +1932,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
DBuilder.createComplexVariable(Tag,
llvm::DIDescriptor(RegionStack.back()),
VD->getName(), Unit, Line, Ty,
- addr.data(), addr.size());
+ addr, ArgNo);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
@@ -1825,7 +1945,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::DIVariable D =
DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
Name, Unit, Line, Ty,
- CGM.getLangOptions().Optimize, Flags);
+ CGM.getLangOptions().Optimize, Flags, ArgNo);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
@@ -1855,7 +1975,8 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::DIVariable D =
DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
FieldName, Unit, Line, FieldTy,
- CGM.getLangOptions().Optimize, Flags);
+ CGM.getLangOptions().Optimize, Flags,
+ ArgNo);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
@@ -1867,17 +1988,22 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
}
}
-/// EmitDeclare - Emit local variable declaration debug info.
-void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
- llvm::Value *Storage, CGBuilderTy &Builder,
- const CGBlockInfo &blockInfo) {
- assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
+ llvm::Value *Storage,
+ CGBuilderTy &Builder) {
+ EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder);
+}
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
+ const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
if (Builder.GetInsertBlock() == 0)
return;
-
+
bool isByRef = VD->hasAttr<BlocksAttr>();
-
+
uint64_t XOffset = 0;
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
llvm::DIType Ty;
@@ -1904,46 +2030,34 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
- offset = CharUnits::fromQuantity(target.getPointerSize()/8);
+ offset = CGM.getContext().toCharUnitsFromBits(target.getPointerSizeInBits());
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of x field
- offset = CharUnits::fromQuantity(XOffset/8);
+ offset = CGM.getContext().toCharUnitsFromBits(XOffset);
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
}
// Create the descriptor for the variable.
llvm::DIVariable D =
- DBuilder.createComplexVariable(Tag, llvm::DIDescriptor(RegionStack.back()),
- VD->getName(), Unit, Line, Ty,
- addr.data(), addr.size());
+ DBuilder.createComplexVariable(llvm::dwarf::DW_TAG_auto_variable,
+ llvm::DIDescriptor(RegionStack.back()),
+ VD->getName(), Unit, Line, Ty, addr);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
- DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertPoint());
llvm::MDNode *Scope = RegionStack.back();
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
}
-void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
- llvm::Value *Storage,
- CGBuilderTy &Builder) {
- EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder);
-}
-
-void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
- const VarDecl *variable, llvm::Value *Storage, CGBuilderTy &Builder,
- const CGBlockInfo &blockInfo) {
- EmitDeclare(variable, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder,
- blockInfo);
-}
-
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
/// variable declaration.
void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
+ unsigned ArgNo,
CGBuilderTy &Builder) {
- EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, Builder);
+ EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, ArgNo, Builder);
}
namespace {
@@ -1969,8 +2083,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
unsigned column = getColumnNumber(loc);
// Build the debug-info type for the block literal.
- llvm::DIDescriptor enclosingContext =
- getContextDescriptor(cast<Decl>(blockDecl->getDeclContext()));
+ getContextDescriptor(cast<Decl>(blockDecl->getDeclContext()));
const llvm::StructLayout *blockLayout =
CGM.getTargetData().getStructLayout(block.StructureType);
@@ -2048,18 +2161,31 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
}
const VarDecl *variable = capture->getVariable();
- QualType type = (capture->isByRef() ? C.VoidPtrTy : variable->getType());
llvm::StringRef name = variable->getName();
- fields.push_back(createFieldType(name, type, 0, loc, AS_public,
- offsetInBits, tunit));
+
+ llvm::DIType fieldType;
+ if (capture->isByRef()) {
+ std::pair<uint64_t,unsigned> ptrInfo = C.getTypeInfo(C.VoidPtrTy);
+
+ // FIXME: this creates a second copy of this type!
+ uint64_t xoffset;
+ fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
+ fieldType = DBuilder.createPointerType(fieldType, ptrInfo.first);
+ fieldType = DBuilder.createMemberType(name, tunit, line,
+ ptrInfo.first, ptrInfo.second,
+ offsetInBits, 0, fieldType);
+ } else {
+ fieldType = createFieldType(name, variable->getType(), 0,
+ loc, AS_public, offsetInBits, tunit);
+ }
+ fields.push_back(fieldType);
}
llvm::SmallString<36> typeName;
llvm::raw_svector_ostream(typeName)
<< "__block_literal_" << CGM.getUniqueBlockCount();
- llvm::DIArray fieldsArray =
- DBuilder.getOrCreateArray(fields.data(), fields.size());
+ llvm::DIArray fieldsArray = DBuilder.getOrCreateArray(fields);
llvm::DIType type =
DBuilder.createStructType(tunit, typeName.str(), tunit, line,
@@ -2078,7 +2204,8 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_arg_variable,
llvm::DIDescriptor(scope),
name, tunit, line, type,
- CGM.getLangOptions().Optimize, flags);
+ CGM.getLangOptions().Optimize, flags,
+ cast<llvm::Argument>(addr)->getArgNo() + 1);
// Insert an llvm.dbg.value into the current block.
llvm::Instruction *declare =
@@ -2185,3 +2312,17 @@ CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
return NS;
}
+
+/// UpdateCompletedType - Update type cache because the type is now
+/// translated.
+void CGDebugInfo::UpdateCompletedType(const TagDecl *TD) {
+ QualType Ty = CGM.getContext().getTagDeclType(TD);
+
+ // If the type exist in type cache then remove it from the cache.
+ // There is no need to prepare debug info for the completed type
+ // right now. It will be generated on demand lazily.
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ TypeCache.find(Ty.getAsOpaquePtr());
+ if (it != TypeCache.end())
+ TypeCache.erase(it);
+}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index a390788..27d991b 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -32,6 +32,7 @@ namespace llvm {
namespace clang {
class VarDecl;
class ObjCInterfaceDecl;
+ class ClassTemplateSpecializationDecl;
namespace CodeGen {
class CodeGenModule;
@@ -122,6 +123,16 @@ class CGDebugInfo {
llvm::DIFile F,
llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy);
+
+ llvm::DIArray
+ CollectTemplateParams(const TemplateParameterList *TPList,
+ const TemplateArgumentList &TAList,
+ llvm::DIFile Unit);
+ llvm::DIArray
+ CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit);
+ llvm::DIArray
+ CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TS,
+ llvm::DIFile F);
llvm::DIType createFieldType(llvm::StringRef name, QualType type,
Expr *bitWidth, SourceLocation loc,
@@ -158,6 +169,10 @@ public:
/// has introduced scope change.
void UpdateLineDirectiveRegion(CGBuilderTy &Builder);
+ /// UpdateCompletedType - Update type cache because the type is now
+ /// translated.
+ void UpdateCompletedType(const TagDecl *TD);
+
/// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
/// of a new block.
void EmitRegionStart(CGBuilderTy &Builder);
@@ -181,7 +196,7 @@ public:
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
/// variable declaration.
void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
- CGBuilderTy &Builder);
+ unsigned ArgNo, CGBuilderTy &Builder);
/// EmitDeclareOfBlockLiteralArgVariable - Emit call to
/// llvm.dbg.declare for the block-literal argument to a block
@@ -204,12 +219,7 @@ public:
private:
/// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
- CGBuilderTy &Builder);
-
- /// EmitDeclare - Emit call to llvm.dbg.declare for a variable
- /// declaration from an enclosing block.
- void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
- CGBuilderTy &Builder, const CGBlockInfo &blockInfo);
+ unsigned ArgNo, CGBuilderTy &Builder);
// EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
// See BuildByRefType.
@@ -243,6 +253,10 @@ private:
llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
llvm::StringRef Name, uint64_t *Offset);
+ /// getFunctionDeclaration - Return debug info descriptor to describe method
+ /// declaration for the given method definition.
+ llvm::DISubprogram getFunctionDeclaration(const Decl *D);
+
/// getFunctionName - Get function name for the given FunctionDecl. If the
/// name is constructred on demand (e.g. C++ destructor) then the name
/// is stored on the side.
@@ -252,6 +266,10 @@ private:
/// This is the display name for the debugging info.
llvm::StringRef getObjCMethodName(const ObjCMethodDecl *FD);
+ /// getSelectorName - Return selector name. This is used for debugging
+ /// info.
+ llvm::StringRef getSelectorName(Selector S);
+
/// getClassName - Get class name including template argument list.
llvm::StringRef getClassName(RecordDecl *RD);
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index f4db01d..c027375 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -14,7 +14,6 @@
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include "CGBlocks.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
@@ -70,7 +69,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Friend:
case Decl::FriendTemplate:
case Decl::Block:
- assert(0 && "Declaration not should not be in declstmts!");
+ assert(0 && "Declaration should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
case Decl::Enum: // enum X;
@@ -92,8 +91,9 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
return EmitVarDecl(VD);
}
- case Decl::Typedef: { // typedef int X;
- const TypedefDecl &TD = cast<TypedefDecl>(D);
+ case Decl::Typedef: // typedef int X;
+ case Decl::TypeAlias: { // using X = int; [C++0x]
+ const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
QualType Ty = TD.getUnderlyingType();
if (Ty->isVariablyModifiedType())
@@ -179,7 +179,8 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
new llvm::GlobalVariable(CGM.getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
CGM.EmitNullConstant(D.getType()), Name, 0,
- D.isThreadSpecified(), Ty.getAddressSpace());
+ D.isThreadSpecified(),
+ CGM.getContext().getTargetAddressSpace(Ty));
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
if (Linkage != llvm::GlobalValue::InternalLinkage)
GV->setVisibility(CurFn->getVisibility());
@@ -222,7 +223,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
OldGV->getLinkage(), Init, "",
/*InsertBefore*/ OldGV,
D.isThreadSpecified(),
- D.getType().getAddressSpace());
+ CGM.getContext().getTargetAddressSpace(D.getType()));
GV->setVisibility(OldGV->getVisibility());
// Steal the name of the old global
@@ -289,7 +290,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// FIXME: It is really dangerous to store this in the map; if anyone
// RAUW's the GV uses of this constant will be invalid.
const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
- const llvm::Type *LPtrTy = LTy->getPointerTo(D.getType().getAddressSpace());
+ const llvm::Type *LPtrTy =
+ LTy->getPointerTo(CGM.getContext().getTargetAddressSpace(D.getType()));
DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
// Emit global variable debug descriptor for static vars.
@@ -300,114 +302,6 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
}
}
-unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
- assert(ByRefValueInfo.count(VD) && "Did not find value!");
-
- return ByRefValueInfo.find(VD)->second.second;
-}
-
-llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
- const VarDecl *V) {
- llvm::Value *Loc = Builder.CreateStructGEP(BaseAddr, 1, "forwarding");
- Loc = Builder.CreateLoad(Loc);
- Loc = Builder.CreateStructGEP(Loc, getByRefValueLLVMField(V),
- V->getNameAsString());
- return Loc;
-}
-
-/// BuildByRefType - This routine changes a __block variable declared as T x
-/// into:
-///
-/// struct {
-/// void *__isa;
-/// void *__forwarding;
-/// int32_t __flags;
-/// int32_t __size;
-/// void *__copy_helper; // only if needed
-/// void *__destroy_helper; // only if needed
-/// char padding[X]; // only if needed
-/// T x;
-/// } x
-///
-const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
- std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
- if (Info.first)
- return Info.first;
-
- QualType Ty = D->getType();
-
- std::vector<const llvm::Type *> Types;
-
- llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(getLLVMContext());
-
- // void *__isa;
- Types.push_back(Int8PtrTy);
-
- // void *__forwarding;
- Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
-
- // int32_t __flags;
- Types.push_back(Int32Ty);
-
- // int32_t __size;
- Types.push_back(Int32Ty);
-
- bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty);
- if (HasCopyAndDispose) {
- /// void *__copy_helper;
- Types.push_back(Int8PtrTy);
-
- /// void *__destroy_helper;
- Types.push_back(Int8PtrTy);
- }
-
- bool Packed = false;
- CharUnits Align = getContext().getDeclAlign(D);
- if (Align > getContext().toCharUnitsFromBits(Target.getPointerAlign(0))) {
- // We have to insert padding.
-
- // The struct above has 2 32-bit integers.
- unsigned CurrentOffsetInBytes = 4 * 2;
-
- // And either 2 or 4 pointers.
- CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
- CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
-
- // Align the offset.
- unsigned AlignedOffsetInBytes =
- llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
-
- unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
- if (NumPaddingBytes > 0) {
- const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
- // FIXME: We need a sema error for alignment larger than the minimum of
- // the maximal stack alignmint and the alignment of malloc on the system.
- if (NumPaddingBytes > 1)
- Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
-
- Types.push_back(Ty);
-
- // We want a packed struct.
- Packed = true;
- }
- }
-
- // T x;
- Types.push_back(ConvertTypeForMem(Ty));
-
- const llvm::Type *T = llvm::StructType::get(getLLVMContext(), Types, Packed);
-
- cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
- CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
- ByRefTypeHolder.get());
-
- Info.first = ByRefTypeHolder.get();
-
- Info.second = Types.size() - 1;
-
- return Info.first;
-}
-
namespace {
struct CallArrayDtor : EHScopeStack::Cleanup {
CallArrayDtor(const CXXDestructorDecl *Dtor,
@@ -498,20 +392,11 @@ namespace {
CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(Arg),
- CGF.getContext().getPointerType(Var.getType())));
+ Args.add(RValue::get(Arg),
+ CGF.getContext().getPointerType(Var.getType()));
CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
}
};
-
- struct CallBlockRelease : EHScopeStack::Cleanup {
- llvm::Value *Addr;
- CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
- }
- };
}
@@ -642,7 +527,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// candidate nor a __block variable, emit it as a global instead.
if (CGM.getCodeGenOpts().MergeAllConstants && Ty.isConstQualified() &&
!NRVO && !isByRef) {
- EmitStaticVarDecl(D, llvm::GlobalValue::PrivateLinkage);
+ EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
emission.Address = 0; // signal this condition to later callbacks
assert(emission.wasEmittedAsGlobal());
@@ -724,7 +609,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Get the element type.
const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
- const llvm::Type *LElemPtrTy = LElemTy->getPointerTo(Ty.getAddressSpace());
+ const llvm::Type *LElemPtrTy =
+ LElemTy->getPointerTo(CGM.getContext().getTargetAddressSpace(Ty));
llvm::Value *VLASize = EmitVLASize(Ty);
@@ -800,75 +686,14 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
EnsureInsertPoint();
}
- CharUnits alignment = emission.Alignment;
-
- if (emission.IsByRef) {
- llvm::Value *V;
-
- BlockFieldFlags fieldFlags;
- bool fieldNeedsCopyDispose = false;
-
- if (type->isBlockPointerType()) {
- fieldFlags |= BLOCK_FIELD_IS_BLOCK;
- fieldNeedsCopyDispose = true;
- } else if (getContext().isObjCNSObjectType(type) ||
- type->isObjCObjectPointerType()) {
- fieldFlags |= BLOCK_FIELD_IS_OBJECT;
- fieldNeedsCopyDispose = true;
- } else if (getLangOptions().CPlusPlus) {
- if (getContext().getBlockVarCopyInits(&D))
- fieldNeedsCopyDispose = true;
- else if (const CXXRecordDecl *record = type->getAsCXXRecordDecl())
- fieldNeedsCopyDispose = !record->hasTrivialDestructor();
- }
-
- llvm::Value *addr = emission.Address;
-
- // FIXME: Someone double check this.
- if (type.isObjCGCWeak())
- fieldFlags |= BLOCK_FIELD_IS_WEAK;
-
- // Initialize the 'isa', which is just 0 or 1.
- int isa = 0;
- if (fieldFlags & BLOCK_FIELD_IS_WEAK)
- isa = 1;
- V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa");
- Builder.CreateStore(V, Builder.CreateStructGEP(addr, 0, "byref.isa"));
-
- // Store the address of the variable into its own forwarding pointer.
- Builder.CreateStore(addr,
- Builder.CreateStructGEP(addr, 1, "byref.forwarding"));
-
- // Blocks ABI:
- // c) the flags field is set to either 0 if no helper functions are
- // needed or BLOCK_HAS_COPY_DISPOSE if they are,
- BlockFlags flags;
- if (fieldNeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
- Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
- Builder.CreateStructGEP(addr, 2, "byref.flags"));
-
- const llvm::Type *V1;
- V1 = cast<llvm::PointerType>(addr->getType())->getElementType();
- V = llvm::ConstantInt::get(IntTy, CGM.GetTargetTypeStoreSize(V1).getQuantity());
- Builder.CreateStore(V, Builder.CreateStructGEP(addr, 3, "byref.size"));
-
- if (fieldNeedsCopyDispose) {
- llvm::Value *copy_helper = Builder.CreateStructGEP(addr, 4);
- Builder.CreateStore(CGM.BuildbyrefCopyHelper(addr->getType(), fieldFlags,
- alignment.getQuantity(), &D),
- copy_helper);
-
- llvm::Value *destroy_helper = Builder.CreateStructGEP(addr, 5);
- Builder.CreateStore(CGM.BuildbyrefDestroyHelper(addr->getType(),
- fieldFlags,
- alignment.getQuantity(),
- &D),
- destroy_helper);
- }
- }
+ // Initialize the structure of a __block variable.
+ if (emission.IsByRef)
+ emitByrefStructureInit(emission);
if (!Init) return;
+ CharUnits alignment = emission.Alignment;
+
// Check whether this is a byref variable that's potentially
// captured and moved by its own initializer. If so, we'll need to
// emit the initializer first, then copy into the variable.
@@ -877,67 +702,91 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::Value *Loc =
capturedByInit ? emission.Address : emission.getObjectAddress(*this);
- bool isVolatile = type.isVolatileQualified();
-
+ if (!emission.IsConstantAggregate)
+ return EmitExprAsInit(Init, &D, Loc, alignment, capturedByInit);
+
// If this is a simple aggregate initialization, we can optimize it
// in various ways.
- if (emission.IsConstantAggregate) {
- assert(!capturedByInit && "constant init contains a capturing block?");
-
- llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), type, this);
- assert(Init != 0 && "Wasn't a simple constant init?");
-
- llvm::Value *SizeVal =
- llvm::ConstantInt::get(IntPtrTy,
- getContext().getTypeSizeInChars(type).getQuantity());
-
- const llvm::Type *BP = Int8PtrTy;
- if (Loc->getType() != BP)
- Loc = Builder.CreateBitCast(Loc, BP, "tmp");
-
- // If the initializer is all or mostly zeros, codegen with memset then do
- // a few stores afterward.
- if (shouldUseMemSetPlusStoresToInitialize(Init,
- CGM.getTargetData().getTypeAllocSize(Init->getType()))) {
- Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
- alignment.getQuantity(), isVolatile);
- if (!Init->isNullValue()) {
- Loc = Builder.CreateBitCast(Loc, Init->getType()->getPointerTo());
- emitStoresForInitAfterMemset(Init, Loc, isVolatile, Builder);
- }
- } else {
- // Otherwise, create a temporary global with the initializer then
- // memcpy from the global to the alloca.
- std::string Name = GetStaticDeclName(*this, D, ".");
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
- llvm::GlobalValue::InternalLinkage,
- Init, Name, 0, false, 0);
- GV->setAlignment(alignment.getQuantity());
-
- llvm::Value *SrcPtr = GV;
- if (SrcPtr->getType() != BP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+ assert(!capturedByInit && "constant init contains a capturing block?");
+
+ bool isVolatile = type.isVolatileQualified();
- Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, alignment.getQuantity(),
- isVolatile);
+ llvm::Constant *constant = CGM.EmitConstantExpr(D.getInit(), type, this);
+ assert(constant != 0 && "Wasn't a simple constant init?");
+
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(IntPtrTy,
+ getContext().getTypeSizeInChars(type).getQuantity());
+
+ const llvm::Type *BP = Int8PtrTy;
+ if (Loc->getType() != BP)
+ Loc = Builder.CreateBitCast(Loc, BP, "tmp");
+
+ // If the initializer is all or mostly zeros, codegen with memset then do
+ // a few stores afterward.
+ if (shouldUseMemSetPlusStoresToInitialize(constant,
+ CGM.getTargetData().getTypeAllocSize(constant->getType()))) {
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
+ alignment.getQuantity(), isVolatile);
+ if (!constant->isNullValue()) {
+ Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo());
+ emitStoresForInitAfterMemset(constant, Loc, isVolatile, Builder);
}
- } else if (type->isReferenceType()) {
- RValue RV = EmitReferenceBindingToExpr(Init, &D);
- if (capturedByInit) Loc = BuildBlockByrefAddress(Loc, &D);
- EmitStoreOfScalar(RV.getScalarVal(), Loc, false, alignment.getQuantity(),
- type);
+ } else {
+ // Otherwise, create a temporary global with the initializer then
+ // memcpy from the global to the alloca.
+ std::string Name = GetStaticDeclName(*this, D, ".");
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ constant, Name, 0, false, 0);
+ GV->setAlignment(alignment.getQuantity());
+
+ llvm::Value *SrcPtr = GV;
+ if (SrcPtr->getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+
+ Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, alignment.getQuantity(),
+ isVolatile);
+ }
+}
+
+/// Emit an expression as an initializer for a variable at the given
+/// location. The expression is not necessarily the normal
+/// initializer for the variable, and the address is not necessarily
+/// its normal location.
+///
+/// \param init the initializing expression
+/// \param var the variable to act as if we're initializing
+/// \param loc the address to initialize; its type is a pointer
+/// to the LLVM mapping of the variable's type
+/// \param alignment the alignment of the address
+/// \param capturedByInit true if the variable is a __block variable
+/// whose address is potentially changed by the initializer
+void CodeGenFunction::EmitExprAsInit(const Expr *init,
+ const VarDecl *var,
+ llvm::Value *loc,
+ CharUnits alignment,
+ bool capturedByInit) {
+ QualType type = var->getType();
+ bool isVolatile = type.isVolatileQualified();
+
+ if (type->isReferenceType()) {
+ RValue RV = EmitReferenceBindingToExpr(init, var);
+ if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
+ EmitStoreOfScalar(RV.getScalarVal(), loc, false,
+ alignment.getQuantity(), type);
} else if (!hasAggregateLLVMType(type)) {
- llvm::Value *V = EmitScalarExpr(Init);
- if (capturedByInit) Loc = BuildBlockByrefAddress(Loc, &D);
- EmitStoreOfScalar(V, Loc, isVolatile, alignment.getQuantity(), type);
+ llvm::Value *V = EmitScalarExpr(init);
+ if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
+ EmitStoreOfScalar(V, loc, isVolatile, alignment.getQuantity(), type);
} else if (type->isAnyComplexType()) {
- ComplexPairTy complex = EmitComplexExpr(Init);
- if (capturedByInit) Loc = BuildBlockByrefAddress(Loc, &D);
- StoreComplexToAddr(complex, Loc, isVolatile);
+ ComplexPairTy complex = EmitComplexExpr(init);
+ if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
+ StoreComplexToAddr(complex, loc, isVolatile);
} else {
// TODO: how can we delay here if D is captured by its initializer?
- EmitAggExpr(Init, AggValueSlot::forAddr(Loc, isVolatile, true, false));
+ EmitAggExpr(init, AggValueSlot::forAddr(loc, isVolatile, true, false));
}
}
@@ -993,14 +842,14 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
// If this is a block variable, call _Block_object_destroy
// (on the unforwarded address).
- if (emission.IsByRef &&
- CGM.getLangOptions().getGCMode() != LangOptions::GCOnly)
- EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, emission.Address);
+ if (emission.IsByRef)
+ enterByrefCleanup(emission);
}
/// Emit an alloca (or GlobalValue depending on target)
/// for the specified parameter and set up LocalDeclMap.
-void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
+ unsigned ArgNo) {
// FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
@@ -1046,6 +895,6 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
// Emit debug info for param declaration.
if (CGDebugInfo *DI = getDebugInfo()) {
DI->setLocation(D.getLocation());
- DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder);
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
}
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index e295267..45b0b96 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -138,6 +138,8 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
llvm::Constant *AtExitFn = CGM.CreateRuntimeFunction(AtExitFnTy,
"__cxa_atexit");
+ if (llvm::Function *Fn = dyn_cast<llvm::Function>(AtExitFn))
+ Fn->setDoesNotThrow();
llvm::Constant *Handle = CGM.CreateRuntimeVariable(Int8PtrTy,
"__dso_handle");
@@ -149,6 +151,14 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
llvm::GlobalVariable *DeclPtr) {
+ // If we've been asked to forbid guard variables, emit an error now.
+ // This diagnostic is hard-coded for Darwin's use case; we can find
+ // better phrasing if someone else needs it.
+ if (CGM.getCodeGenOpts().ForbidGuardVariables)
+ CGM.Error(D.getLocation(),
+ "this initialization requires a guard variable, which "
+ "the kernel does not support");
+
CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr);
}
@@ -166,7 +176,7 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
Fn->setSection(Section);
}
- if (!CGM.getLangOptions().areExceptionsEnabled())
+ if (!CGM.getLangOptions().Exceptions)
Fn->setDoesNotThrow();
return Fn;
@@ -260,12 +270,16 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
llvm::GlobalVariable *Addr) {
- StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
- SourceLocation());
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().getNullaryFunctionInfo(),
+ FunctionArgList(), SourceLocation());
// Use guarded initialization if the global variable is weak due to
- // being a class template's static data member.
- if (Addr->hasWeakLinkage() && D->getInstantiatedFromStaticDataMember()) {
+ // being a class template's static data member. These will always
+ // have weak_odr linkage.
+ if (Addr->getLinkage() == llvm::GlobalValue::WeakODRLinkage &&
+ D->isStaticDataMember() &&
+ D->getInstantiatedFromStaticDataMember()) {
EmitCXXGuardedInit(*D, Addr);
} else {
EmitCXXGlobalVarDeclInit(*D, Addr);
@@ -277,8 +291,9 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
llvm::Constant **Decls,
unsigned NumDecls) {
- StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
- SourceLocation());
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().getNullaryFunctionInfo(),
+ FunctionArgList(), SourceLocation());
for (unsigned i = 0; i != NumDecls; ++i)
if (Decls[i])
@@ -290,8 +305,9 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
&DtorsAndObjects) {
- StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
- SourceLocation());
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().getNullaryFunctionInfo(),
+ FunctionArgList(), SourceLocation());
// Emit the dtors, in reverse order from construction.
for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
@@ -313,21 +329,19 @@ llvm::Function *
CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
const ArrayType *Array,
llvm::Value *This) {
- FunctionArgList Args;
- ImplicitParamDecl *Dst =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
- Args.push_back(std::make_pair(Dst, Dst->getType()));
+ FunctionArgList args;
+ ImplicitParamDecl dst(0, SourceLocation(), 0, getContext().VoidPtrTy);
+ args.push_back(&dst);
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args,
+ CGM.getTypes().getFunctionInfo(getContext().VoidTy, args,
FunctionType::ExtInfo());
const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *Fn =
CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
- StartFunction(GlobalDecl(), getContext().VoidTy, Fn, Args, SourceLocation());
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FI, args,
+ SourceLocation());
QualType BaseElementTy = getContext().getBaseElementType(Array);
const llvm::Type *BasePtr = ConvertType(BaseElementTy)->getPointerTo();
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 4bce081..6cb9599 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -28,24 +28,22 @@ using namespace CodeGen;
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
- const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
- std::vector<const llvm::Type*> Args(1, SizeTy);
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getInt8PtrTy(CGF.getLLVMContext()),
- Args, false);
+ llvm::FunctionType::get(llvm::Type::getInt8PtrTy(CGF.getLLVMContext()),
+ SizeTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
}
static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) {
// void __cxa_free_exception(void *thrown_exception);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Args, false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Int8PtrTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
}
@@ -55,11 +53,10 @@ static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
// void (*dest) (void *));
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- std::vector<const llvm::Type*> Args(3, Int8PtrTy);
-
+ const llvm::Type *Args[3] = { Int8PtrTy, Int8PtrTy, Int8PtrTy };
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Args, false);
+ Args, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
}
@@ -68,18 +65,18 @@ static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
// void __cxa_rethrow();
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
}
static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
// void *__cxa_get_exception_ptr(void*);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(Int8PtrTy, Args, false);
+ llvm::FunctionType::get(Int8PtrTy, Int8PtrTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
}
@@ -88,10 +85,8 @@ static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
// void *__cxa_begin_catch(void*);
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- std::vector<const llvm::Type*> Args(1, Int8PtrTy);
-
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(Int8PtrTy, Args, false);
+ llvm::FunctionType::get(Int8PtrTy, Int8PtrTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
}
@@ -100,7 +95,8 @@ static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
// void __cxa_end_catch();
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
}
@@ -109,22 +105,18 @@ static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
// void __cxa_call_unexepcted(void *thrown_exception);
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- std::vector<const llvm::Type*> Args(1, Int8PtrTy);
-
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Args, false);
+ Int8PtrTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
}
llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
- std::vector<const llvm::Type*> Args(1, Int8PtrTy);
-
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), Args,
- false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), Int8PtrTy,
+ /*IsVarArgs=*/false);
if (CGM.getLangOptions().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
@@ -135,7 +127,8 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
// void __terminate();
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy,
CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort");
@@ -145,10 +138,9 @@ static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
llvm::StringRef Name) {
const llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- std::vector<const llvm::Type*> Args(1, Int8PtrTy);
-
const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- const llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, Args, false);
+ const llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, Int8PtrTy,
+ /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, Name);
}
@@ -160,6 +152,7 @@ const EHPersonality EHPersonality::GNU_CPlusPlus("__gxx_personality_v0");
const EHPersonality EHPersonality::GNU_CPlusPlus_SJLJ("__gxx_personality_sj0");
const EHPersonality EHPersonality::GNU_ObjC("__gnu_objc_personality_v0",
"objc_exception_throw");
+const EHPersonality EHPersonality::GNU_ObjCXX("__gnustep_objcxx_personality_v0");
static const EHPersonality &getCPersonality(const LangOptions &L) {
if (L.SjLjExceptions)
@@ -201,7 +194,7 @@ static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
// The GNU runtime's personality function inherently doesn't support
// mixed EH. Use the C++ personality just to avoid returning null.
- return getCXXPersonality(L);
+ return EHPersonality::GNU_ObjCXX;
}
const EHPersonality &EHPersonality::get(const LangOptions &L) {
@@ -273,7 +266,7 @@ static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
/// when it really needs it.
void CodeGenModule::SimplifyPersonality() {
// For now, this is really a Darwin-specific operation.
- if (Context.Target.getTriple().getOS() != llvm::Triple::Darwin)
+ if (!Context.Target.getTriple().isOSDarwin())
return;
// If we're not in ObjC++ -fexceptions, there's nothing to do.
@@ -439,7 +432,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
}
void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
- if (!CGM.getLangOptions().areExceptionsEnabled())
+ if (!CGM.getLangOptions().CXXExceptions)
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -449,25 +442,28 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
if (Proto == 0)
return;
- assert(!Proto->hasAnyExceptionSpec() && "function with parameter pack");
-
- if (!Proto->hasExceptionSpec())
- return;
-
- unsigned NumExceptions = Proto->getNumExceptions();
- EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
-
- for (unsigned I = 0; I != NumExceptions; ++I) {
- QualType Ty = Proto->getExceptionType(I);
- QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
- llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType,
- /*ForEH=*/true);
- Filter->setFilter(I, EHType);
+ ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+ if (isNoexceptExceptionSpec(EST)) {
+ if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
+ // noexcept functions are simple terminate scopes.
+ EHStack.pushTerminate();
+ }
+ } else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ unsigned NumExceptions = Proto->getNumExceptions();
+ EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
+
+ for (unsigned I = 0; I != NumExceptions; ++I) {
+ QualType Ty = Proto->getExceptionType(I);
+ QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType,
+ /*ForEH=*/true);
+ Filter->setFilter(I, EHType);
+ }
}
}
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
- if (!CGM.getLangOptions().areExceptionsEnabled())
+ if (!CGM.getLangOptions().CXXExceptions)
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -477,10 +473,14 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
if (Proto == 0)
return;
- if (!Proto->hasExceptionSpec())
- return;
-
- EHStack.popFilter();
+ ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+ if (isNoexceptExceptionSpec(EST)) {
+ if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
+ EHStack.popTerminate();
+ }
+ } else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ EHStack.popFilter();
+ }
}
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
@@ -541,7 +541,7 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
assert(EHStack.requiresLandingPad());
assert(!EHStack.empty());
- if (!CGM.getLangOptions().areExceptionsEnabled())
+ if (!CGM.getLangOptions().Exceptions)
return 0;
// Check the innermost scope for a cached landing pad. If this is
@@ -664,7 +664,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
assert(!CatchAll.isValid() && "EH filter reached after catch-all");
- // Filter scopes get added to the selector in wierd ways.
+ // Filter scopes get added to the selector in weird ways.
EHFilterScope &Filter = cast<EHFilterScope>(*I);
HasEHFilter = true;
diff --git a/lib/CodeGen/CGException.h b/lib/CodeGen/CGException.h
index 1f9b896..5a743b5 100644
--- a/lib/CodeGen/CGException.h
+++ b/lib/CodeGen/CGException.h
@@ -41,6 +41,7 @@ public:
static const EHPersonality GNU_C;
static const EHPersonality GNU_C_SJLJ;
static const EHPersonality GNU_ObjC;
+ static const EHPersonality GNU_ObjCXX;
static const EHPersonality NeXT_ObjC;
static const EHPersonality GNU_CPlusPlus;
static const EHPersonality GNU_CPlusPlus_SJLJ;
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 2abaadf..bc2cd35 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -15,6 +15,7 @@
#include "CodeGenModule.h"
#include "CGCall.h"
#include "CGCXXABI.h"
+#include "CGDebugInfo.h"
#include "CGRecordLayout.h"
#include "CGObjCRuntime.h"
#include "clang/AST/ASTContext.h"
@@ -215,24 +216,28 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
InitializedDecl);
}
+ if (const ObjCPropertyRefExpr *PRE =
+ dyn_cast<ObjCPropertyRefExpr>(E->IgnoreParenImpCasts()))
+ if (PRE->getGetterResultType()->isReferenceType())
+ E = PRE;
+
RValue RV;
if (E->isGLValue()) {
// Emit the expression as an lvalue.
LValue LV = CGF.EmitLValue(E);
+ if (LV.isPropertyRef()) {
+ RV = CGF.EmitLoadOfPropertyRefLValue(LV);
+ return RV.getScalarVal();
+ }
if (LV.isSimple())
return LV.getAddress();
// We have to load the lvalue.
RV = CGF.EmitLoadOfLValue(LV, E->getType());
} else {
- QualType ResultTy = E->getType();
-
llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
while (true) {
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
- E = PE->getSubExpr();
- continue;
- }
+ E = E->IgnoreParens();
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
if ((CE->getCastKind() == CK_DerivedToBase ||
@@ -327,9 +332,8 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
}
}
-
- const llvm::Type *ResultPtrTy = CGF.ConvertType(ResultTy)->getPointerTo();
- return CGF.Builder.CreateBitCast(Object, ResultPtrTy, "temp");
+
+ return Object;
}
}
@@ -536,6 +540,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::DeclRefExprClass:
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ case Expr::GenericSelectionExprClass:
+ return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
case Expr::PredefinedExprClass:
return EmitPredefinedLValue(cast<PredefinedExpr>(E));
case Expr::StringLiteralClass:
@@ -718,21 +724,22 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
// Offset by the byte offset, if used.
- if (AI.FieldByteOffset) {
+ if (!AI.FieldByteOffset.isZero()) {
Ptr = EmitCastToVoidPtr(Ptr);
- Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
+ "bf.field.offs");
}
// Cast to the access type.
const llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
AI.AccessWidth,
- ExprType.getAddressSpace());
+ CGM.getContext().getTargetAddressSpace(ExprType));
Ptr = Builder.CreateBitCast(Ptr, PTy);
// Perform the load.
llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
- if (AI.AccessAlignment)
- Load->setAlignment(AI.AccessAlignment);
+ if (!AI.AccessAlignment.isZero())
+ Load->setAlignment(AI.AccessAlignment.getQuantity());
// Shift out unused low bits and mask out unused high bits.
llvm::Value *Val = Load;
@@ -921,9 +928,10 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
// Offset by the byte offset, if used.
- if (AI.FieldByteOffset) {
+ if (!AI.FieldByteOffset.isZero()) {
Ptr = EmitCastToVoidPtr(Ptr);
- Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
+ "bf.field.offs");
}
// Cast to the access type.
@@ -954,8 +962,8 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// If necessary, load and OR in bits that are outside of the bit-field.
if (AI.TargetBitWidth != AI.AccessWidth) {
llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
- if (AI.AccessAlignment)
- Load->setAlignment(AI.AccessAlignment);
+ if (!AI.AccessAlignment.isZero())
+ Load->setAlignment(AI.AccessAlignment.getQuantity());
// Compute the mask for zeroing the bits that are part of the bit-field.
llvm::APInt InvMask =
@@ -969,8 +977,8 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Write the value.
llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
Dst.isVolatileQualified());
- if (AI.AccessAlignment)
- Store->setAlignment(AI.AccessAlignment);
+ if (!AI.AccessAlignment.isZero())
+ Store->setAlignment(AI.AccessAlignment.getQuantity());
}
}
@@ -1090,6 +1098,12 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
}
return;
}
+
+ if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
+ return;
+ }
+
if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
return;
@@ -1415,6 +1429,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// We know that the pointer points to a type of the correct size, unless the
// size is a VLA or Objective-C interface.
llvm::Value *Address = 0;
+ unsigned ArrayAlignment = 0;
if (const VariableArrayType *VAT =
getContext().getAsVariableArrayType(E->getType())) {
llvm::Value *VLASize = GetVLASize(VAT);
@@ -1425,7 +1440,10 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
llvm::Value *Base = EmitScalarExpr(E->getBase());
Address = EmitCastToVoidPtr(Base);
- Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
+ if (getContext().getLangOptions().isSignedOverflowDefined())
+ Address = Builder.CreateGEP(Address, Idx, "arrayidx");
+ else
+ Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
Address = Builder.CreateBitCast(Address, Base->getType());
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
@@ -1447,22 +1465,38 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// "gep x, i" here. Emit one "gep A, 0, i".
assert(Array->getType()->isArrayType() &&
"Array to pointer decay must have array source type!");
- llvm::Value *ArrayPtr = EmitLValue(Array).getAddress();
+ LValue ArrayLV = EmitLValue(Array);
+ llvm::Value *ArrayPtr = ArrayLV.getAddress();
llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
llvm::Value *Args[] = { Zero, Idx };
- Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, Args+2, "arrayidx");
+ // Propagate the alignment from the array itself to the result.
+ ArrayAlignment = ArrayLV.getAlignment();
+
+ if (getContext().getLangOptions().isSignedOverflowDefined())
+ Address = Builder.CreateGEP(ArrayPtr, Args, Args+2, "arrayidx");
+ else
+ Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, Args+2, "arrayidx");
} else {
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
- Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+ if (getContext().getLangOptions().isSignedOverflowDefined())
+ Address = Builder.CreateGEP(Base, Idx, "arrayidx");
+ else
+ Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
}
QualType T = E->getBase()->getType()->getPointeeType();
assert(!T.isNull() &&
"CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
- LValue LV = MakeAddrLValue(Address, T);
+ // Limit the alignment to that of the result type.
+ if (ArrayAlignment) {
+ unsigned Align = getContext().getTypeAlignInChars(T).getQuantity();
+ ArrayAlignment = std::min(Align, ArrayAlignment);
+ }
+
+ LValue LV = MakeAddrLValue(Address, T, ArrayAlignment);
LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
if (getContext().getLangOptions().ObjC1 &&
@@ -1715,10 +1749,10 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
}
const Expr *condExpr = expr->getCond();
-
- if (int condValue = ConstantFoldsToSimpleInteger(condExpr)) {
+ bool CondExprBool;
+ if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
- if (condValue == -1) std::swap(live, dead);
+ if (!CondExprBool) std::swap(live, dead);
if (!ContainsLabel(dead))
return EmitLValue(live);
@@ -1756,9 +1790,8 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
EmitBlock(contBlock);
- llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(),
+ llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
"cond-lvalue");
- phi->reserveOperandSpace(2);
phi->addIncoming(lhs.getAddress(), lhsBlock);
phi->addIncoming(rhs.getAddress(), rhsBlock);
return MakeAddrLValue(phi, expr->getType());
@@ -1927,6 +1960,12 @@ LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(E->getLocStart());
+ DI->UpdateLineDirectiveRegion(Builder);
+ DI->EmitStopPoint(Builder);
+ }
+
// Builtins never have block type.
if (E->getCallee()->getType()->isBlockPointerType())
return EmitBlockCallExpr(E, ReturnValue);
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index f992dc7..29c7688 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -81,6 +81,9 @@ public:
CGF.ErrorUnsupported(S, "aggregate expression");
}
void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
+ void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ Visit(GE->getResultExpr());
+ }
void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
// l-values.
@@ -179,11 +182,9 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
/// move will be performed.
void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
if (Dest.requiresGCollection()) {
- std::pair<uint64_t, unsigned> TypeInfo =
- CGF.getContext().getTypeInfo(E->getType());
- unsigned long size = TypeInfo.first/8;
+ CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
- llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(),
Src.getAggregateAddr(),
SizeVal);
@@ -212,11 +213,9 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
}
if (Dest.requiresGCollection()) {
- std::pair<uint64_t, unsigned> TypeInfo =
- CGF.getContext().getTypeInfo(E->getType());
- unsigned long size = TypeInfo.first/8;
+ CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
- llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
Dest.getAddr(),
Src.getAggregateAddr(),
@@ -249,11 +248,6 @@ void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
}
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
- if (Dest.isIgnored() && E->getCastKind() != CK_Dynamic) {
- Visit(E->getSubExpr());
- return;
- }
-
switch (E->getCastKind()) {
case CK_Dynamic: {
assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
@@ -270,6 +264,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
}
case CK_ToUnion: {
+ if (Dest.isIgnored()) break;
+
// GCC union extension
QualType Ty = E->getSubExpr()->getType();
QualType PtrTy = CGF.getContext().getPointerType(Ty);
@@ -309,7 +305,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_LValueBitCast:
llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
break;
-
+
case CK_Dependent:
case CK_BitCast:
case CK_ArrayToPointerDecay:
@@ -397,15 +393,38 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
E->getRHS()->getType())
&& "Invalid assignment");
- // FIXME: __block variables need the RHS evaluated first!
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasAttr<BlocksAttr>() &&
+ E->getRHS()->HasSideEffects(CGF.getContext())) {
+ // When __block variable on LHS, the RHS must be evaluated first
+ // as it may change the 'forwarding' field via call to Block_copy.
+ LValue RHS = CGF.EmitLValue(E->getRHS());
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+ bool GCollection = false;
+ if (CGF.getContext().getLangOptions().getGCMode())
+ GCollection = TypeRequiresGCollection(E->getLHS()->getType());
+ Dest = AggValueSlot::forLValue(LHS, true, GCollection);
+ EmitFinalDestCopy(E, RHS, true);
+ return;
+ }
+
LValue LHS = CGF.EmitLValue(E->getLHS());
// We have to special case property setters, otherwise we must have
// a simple lvalue (no aggregates inside vectors, bitfields).
if (LHS.isPropertyRef()) {
- AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
- CGF.EmitAggExpr(E->getRHS(), Slot);
- CGF.EmitStoreThroughPropertyRefLValue(Slot.asRValue(), LHS);
+ const ObjCPropertyRefExpr *RE = LHS.getPropertyRefExpr();
+ QualType ArgType = RE->getSetterArgType();
+ RValue Src;
+ if (ArgType->isReferenceType())
+ Src = CGF.EmitReferenceBindingToExpr(E->getRHS(), 0);
+ else {
+ AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
+ CGF.EmitAggExpr(E->getRHS(), Slot);
+ Src = Slot.asRValue();
+ }
+ CGF.EmitStoreThroughPropertyRefLValue(Src, LHS);
} else {
bool GCollection = false;
if (CGF.getContext().getLangOptions().getGCMode())
@@ -513,9 +532,8 @@ void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
/// zero to memory, return true. This can return false if uncertain, so it just
/// handles simple cases.
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
- // (0)
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
- return isSimpleZero(PE->getSubExpr(), CGF);
+ E = E->IgnoreParens();
+
// 0
if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
return IL->getValue() == 0;
@@ -619,6 +637,14 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
+ bool hasNonTrivialCXXConstructor = false;
+ if (CGF.getContext().getLangOptions().CPlusPlus)
+ if (const RecordType *RT = CGF.getContext()
+ .getBaseElementType(ElementType)->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ hasNonTrivialCXXConstructor = !RD->hasTrivialConstructor();
+ }
+
// FIXME: were we intentionally ignoring address spaces and GC attributes?
for (uint64_t i = 0; i != NumArrayElements; ++i) {
@@ -626,7 +652,8 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// then we're done.
if (i == NumInitElements &&
Dest.isZeroed() &&
- CGF.getTypes().isZeroInitializable(ElementType))
+ CGF.getTypes().isZeroInitializable(ElementType) &&
+ !hasNonTrivialCXXConstructor)
break;
llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
@@ -634,6 +661,8 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (i < NumInitElements)
EmitInitializationToLValue(E->getInit(i), LV, ElementType);
+ else if (Expr *filler = E->getArrayFiller())
+ EmitInitializationToLValue(filler, LV, ElementType);
else
EmitNullInitializationToLValue(LV, ElementType);
@@ -737,18 +766,17 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
/// non-zero bytes that will be stored when outputting the initializer for the
/// specified initializer expression.
-static uint64_t GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
- return GetNumNonZeroBytesInInit(PE->getSubExpr(), CGF);
+static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
+ E = E->IgnoreParens();
// 0 and 0.0 won't require any non-zero stores!
- if (isSimpleZero(E, CGF)) return 0;
+ if (isSimpleZero(E, CGF)) return CharUnits::Zero();
// If this is an initlist expr, sum up the size of sizes of the (present)
// elements. If this is something weird, assume the whole thing is non-zero.
const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
- return CGF.getContext().getTypeSize(E->getType())/8;
+ return CGF.getContext().getTypeSizeInChars(E->getType());
// InitListExprs for structs have to be handled carefully. If there are
// reference members, we need to consider the size of the reference, not the
@@ -756,7 +784,7 @@ static uint64_t GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
if (!RT->isUnionType()) {
RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
- uint64_t NumNonZeroBytes = 0;
+ CharUnits NumNonZeroBytes = CharUnits::Zero();
unsigned ILEElement = 0;
for (RecordDecl::field_iterator Field = SD->field_begin(),
@@ -773,7 +801,8 @@ static uint64_t GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
// Reference values are always non-null and have the width of a pointer.
if (Field->getType()->isReferenceType())
- NumNonZeroBytes += CGF.getContext().Target.getPointerWidth(0);
+ NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
+ CGF.getContext().Target.getPointerWidth(0));
else
NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
}
@@ -783,7 +812,7 @@ static uint64_t GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
}
- uint64_t NumNonZeroBytes = 0;
+ CharUnits NumNonZeroBytes = CharUnits::Zero();
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
return NumNonZeroBytes;
@@ -797,28 +826,38 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
// If the slot is already known to be zeroed, nothing to do. Don't mess with
// volatile stores.
if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
-
+
+ // C++ objects with a user-declared constructor don't need zero'ing.
+ if (CGF.getContext().getLangOptions().CPlusPlus)
+ if (const RecordType *RT = CGF.getContext()
+ .getBaseElementType(E->getType())->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasUserDeclaredConstructor())
+ return;
+ }
+
// If the type is 16-bytes or smaller, prefer individual stores over memset.
- std::pair<uint64_t, unsigned> TypeInfo =
- CGF.getContext().getTypeInfo(E->getType());
- if (TypeInfo.first/8 <= 16)
+ std::pair<CharUnits, CharUnits> TypeInfo =
+ CGF.getContext().getTypeInfoInChars(E->getType());
+ if (TypeInfo.first <= CharUnits::fromQuantity(16))
return;
// Check to see if over 3/4 of the initializer are known to be zero. If so,
// we prefer to emit memset + individual stores for the rest.
- uint64_t NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
- if (NumNonZeroBytes*4 > TypeInfo.first/8)
+ CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
+ if (NumNonZeroBytes*4 > TypeInfo.first)
return;
// Okay, it seems like a good idea to use an initial memset, emit the call.
- llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first/8);
- unsigned Align = TypeInfo.second/8;
+ llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
+ CharUnits Align = TypeInfo.second;
llvm::Value *Loc = Slot.getAddr();
const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
Loc = CGF.Builder.CreateBitCast(Loc, BP);
- CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, Align, false);
+ CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
+ Align.getQuantity(), false);
// Tell the AggExprEmitter that the slot is known zero.
Slot.setZeroed();
@@ -887,7 +926,8 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// safely handle this, we can add a target hook.
// Get size and alignment info for this aggregate.
- std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+ std::pair<CharUnits, CharUnits> TypeInfo =
+ getContext().getTypeInfoInChars(Ty);
// FIXME: Handle variable sized types.
@@ -917,9 +957,9 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
RecordDecl *Record = RecordTy->getDecl();
if (Record->hasObjectMember()) {
- unsigned long size = TypeInfo.first/8;
+ CharUnits size = TypeInfo.first;
const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
- llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
return;
@@ -928,9 +968,10 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
QualType BaseType = getContext().getBaseElementType(Ty);
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
if (RecordTy->getDecl()->hasObjectMember()) {
- unsigned long size = TypeInfo.first/8;
+ CharUnits size = TypeInfo.first;
const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
- llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
return;
@@ -939,6 +980,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
}
Builder.CreateMemCpy(DestPtr, SrcPtr,
- llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
- TypeInfo.second/8, isVolatile);
+ llvm::ConstantInt::get(IntPtrTy,
+ TypeInfo.first.getQuantity()),
+ TypeInfo.second.getQuantity(), isVolatile);
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index bba7864..bdaa873 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -17,6 +17,8 @@
#include "CGObjCRuntime.h"
#include "CGDebugInfo.h"
#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
+
using namespace clang;
using namespace CodeGen;
@@ -35,13 +37,12 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
CallArgList Args;
// Push the this ptr.
- Args.push_back(std::make_pair(RValue::get(This),
- MD->getThisType(getContext())));
+ Args.add(RValue::get(This), MD->getThisType(getContext()));
// If there is a VTT parameter, emit it.
if (VTT) {
QualType T = getContext().getPointerType(getContext().VoidPtrTy);
- Args.push_back(std::make_pair(RValue::get(VTT), T));
+ Args.add(RValue::get(VTT), T);
}
// And the rest of the call args
@@ -77,6 +78,31 @@ static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
}
+// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
+// quite what we want.
+static const Expr *skipNoOpCastsAndParens(const Expr *E) {
+ while (true) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ continue;
+ }
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_Extension) {
+ E = UO->getSubExpr();
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
/// expr can be devirtualized.
static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
@@ -112,6 +138,7 @@ static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
if (MD->getParent()->hasAttr<FinalAttr>())
return true;
+ Base = skipNoOpCastsAndParens(Base);
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
// This is a record decl. We know the type and can devirtualize it.
@@ -141,10 +168,12 @@ static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
// extensions allowing explicit constructor function call.
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
ReturnValueSlot ReturnValue) {
- if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
+ const Expr *callee = CE->getCallee()->IgnoreParens();
+
+ if (isa<BinaryOperator>(callee))
return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
-
- const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
+
+ const MemberExpr *ME = cast<MemberExpr>(callee);
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
CGDebugInfo *DI = getDebugInfo();
@@ -259,10 +288,10 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
const Expr *MemFnExpr = BO->getRHS();
const MemberPointerType *MPT =
- MemFnExpr->getType()->getAs<MemberPointerType>();
+ MemFnExpr->getType()->castAs<MemberPointerType>();
const FunctionProtoType *FPT =
- MPT->getPointeeType()->getAs<FunctionProtoType>();
+ MPT->getPointeeType()->castAs<FunctionProtoType>();
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
@@ -287,12 +316,11 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
getContext().getPointerType(getContext().getTagDeclType(RD));
// Push the this ptr.
- Args.push_back(std::make_pair(RValue::get(This), ThisType));
+ Args.add(RValue::get(This), ThisType);
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
- const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
- return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
+ return EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee,
ReturnValue, Args);
}
@@ -341,8 +369,9 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
// If we require zero initialization before (or instead of) calling the
// constructor, as can be the case with a non-user-provided default
- // constructor, emit the zero initialization now.
- if (E->requiresZeroInitialization())
+ // constructor, emit the zero initialization now, unless destination is
+ // already zeroed.
+ if (E->requiresZeroInitialization() && !Dest.isZeroed())
EmitNullInitialization(Dest.getAddr(), E->getType());
// If this is a call to a trivial default constructor, do nothing.
@@ -374,9 +403,16 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
E->arg_begin(), E->arg_end());
}
else {
- CXXCtorType Type =
- (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
- ? Ctor_Complete : Ctor_Base;
+ CXXCtorType Type;
+ CXXConstructExpr::ConstructionKind K = E->getConstructionKind();
+ if (K == CXXConstructExpr::CK_Delegating) {
+ // We should be emitting a constructor; GlobalDecl will assert this
+ Type = CurGD.getCtorType();
+ } else {
+ Type = (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
+ ? Ctor_Complete : Ctor_Base;
+ }
+
bool ForVirtualBase =
E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
@@ -595,7 +631,7 @@ static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
Size = CGF.Builder.CreateExtractValue(AddRes, 0);
llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
- DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow);
+ DidOverflow = CGF.Builder.CreateOr(DidOverflow, AddDidOverflow);
}
Size = CGF.Builder.CreateSelect(DidOverflow,
@@ -800,15 +836,15 @@ namespace {
// The first argument is always a void*.
FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
- DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
+ DeleteArgs.add(RValue::get(Ptr), *AI++);
// A member 'operator delete' can take an extra 'size_t' argument.
if (FPT->getNumArgs() == NumPlacementArgs + 2)
- DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
+ DeleteArgs.add(RValue::get(AllocSize), *AI++);
// Pass the rest of the arguments, which must match exactly.
for (unsigned I = 0; I != NumPlacementArgs; ++I)
- DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
+ DeleteArgs.add(getPlacementArgs()[I], *AI++);
// Call 'operator delete'.
CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
@@ -857,18 +893,18 @@ namespace {
// The first argument is always a void*.
FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
- DeleteArgs.push_back(std::make_pair(Ptr.restore(CGF), *AI++));
+ DeleteArgs.add(Ptr.restore(CGF), *AI++);
// A member 'operator delete' can take an extra 'size_t' argument.
if (FPT->getNumArgs() == NumPlacementArgs + 2) {
RValue RV = AllocSize.restore(CGF);
- DeleteArgs.push_back(std::make_pair(RV, *AI++));
+ DeleteArgs.add(RV, *AI++);
}
// Pass the rest of the arguments, which must match exactly.
for (unsigned I = 0; I != NumPlacementArgs; ++I) {
RValue RV = getPlacementArgs()[I].restore(CGF);
- DeleteArgs.push_back(std::make_pair(RV, *AI++));
+ DeleteArgs.add(RV, *AI++);
}
// Call 'operator delete'.
@@ -895,7 +931,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
E->getOperatorDelete(),
NewPtr, AllocSize);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
- Cleanup->setPlacementArg(I, NewArgs[I+1].first);
+ Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
return;
}
@@ -914,148 +950,154 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
SavedAllocSize);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
Cleanup->setPlacementArg(I,
- DominatingValue<RValue>::save(CGF, NewArgs[I+1].first));
+ DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
}
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
- QualType AllocType = E->getAllocatedType();
- if (AllocType->isArrayType())
- while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
- AllocType = AType->getElementType();
+ // The element type being allocated.
+ QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
- FunctionDecl *NewFD = E->getOperatorNew();
- const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
+ // 1. Build a call to the allocation function.
+ FunctionDecl *allocator = E->getOperatorNew();
+ const FunctionProtoType *allocatorType =
+ allocator->getType()->castAs<FunctionProtoType>();
- CallArgList NewArgs;
+ CallArgList allocatorArgs;
// The allocation size is the first argument.
- QualType SizeTy = getContext().getSizeType();
+ QualType sizeType = getContext().getSizeType();
- llvm::Value *NumElements = 0;
- llvm::Value *AllocSizeWithoutCookie = 0;
- llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
- *this, E, NumElements,
- AllocSizeWithoutCookie);
+ llvm::Value *numElements = 0;
+ llvm::Value *allocSizeWithoutCookie = 0;
+ llvm::Value *allocSize =
+ EmitCXXNewAllocSize(getContext(), *this, E, numElements,
+ allocSizeWithoutCookie);
- NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
+ allocatorArgs.add(RValue::get(allocSize), sizeType);
// Emit the rest of the arguments.
// FIXME: Ideally, this should just use EmitCallArgs.
- CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
+ CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
// First, use the types from the function type.
// We start at 1 here because the first argument (the allocation size)
// has already been emitted.
- for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
- QualType ArgType = NewFTy->getArgType(i);
+ for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
+ ++i, ++placementArg) {
+ QualType argType = allocatorType->getArgType(i);
- assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
- getTypePtr() ==
- getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
+ assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
+ placementArg->getType()) &&
"type mismatch in call argument!");
- NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
- ArgType));
-
+ EmitCallArg(allocatorArgs, *placementArg, argType);
}
// Either we've emitted all the call args, or we have a call to a
// variadic function.
- assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
- "Extra arguments in non-variadic function!");
+ assert((placementArg == E->placement_arg_end() ||
+ allocatorType->isVariadic()) &&
+ "Extra arguments to non-variadic function!");
// If we still have any arguments, emit them using the type of the argument.
- for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
- NewArg != NewArgEnd; ++NewArg) {
- QualType ArgType = NewArg->getType();
- NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
- ArgType));
+ for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
+ placementArg != placementArgsEnd; ++placementArg) {
+ EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
}
- // Emit the call to new.
+ // Emit the allocation call.
RValue RV =
- EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
- CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
-
- // If an allocation function is declared with an empty exception specification
- // it returns null to indicate failure to allocate storage. [expr.new]p13.
- // (We don't need to check for null when there's no new initializer and
- // we're allocating a POD type).
- bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
- !(AllocType->isPODType() && !E->hasInitializer());
-
- llvm::BasicBlock *NullCheckSource = 0;
- llvm::BasicBlock *NewNotNull = 0;
- llvm::BasicBlock *NewEnd = 0;
-
- llvm::Value *NewPtr = RV.getScalarVal();
- unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
-
- if (NullCheckResult) {
- NullCheckSource = Builder.GetInsertBlock();
- NewNotNull = createBasicBlock("new.notnull");
- NewEnd = createBasicBlock("new.end");
-
- llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull");
- Builder.CreateCondBr(IsNull, NewEnd, NewNotNull);
- EmitBlock(NewNotNull);
+ EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType),
+ CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
+ allocatorArgs, allocator);
+
+ // Emit a null check on the allocation result if the allocation
+ // function is allowed to return null (because it has a non-throwing
+ // exception spec; for this part, we inline
+ // CXXNewExpr::shouldNullCheckAllocation()) and we have an
+ // interesting initializer.
+ bool nullCheck = allocatorType->isNothrow(getContext()) &&
+ !(allocType->isPODType() && !E->hasInitializer());
+
+ llvm::BasicBlock *nullCheckBB = 0;
+ llvm::BasicBlock *contBB = 0;
+
+ llvm::Value *allocation = RV.getScalarVal();
+ unsigned AS =
+ cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
+
+ // The null-check means that the initializer is conditionally
+ // evaluated.
+ ConditionalEvaluation conditional(*this);
+
+ if (nullCheck) {
+ conditional.begin(*this);
+
+ nullCheckBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
+ contBB = createBasicBlock("new.cont");
+
+ llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
+ Builder.CreateCondBr(isNull, contBB, notNullBB);
+ EmitBlock(notNullBB);
}
- assert((AllocSize == AllocSizeWithoutCookie) ==
+ assert((allocSize == allocSizeWithoutCookie) ==
CalculateCookiePadding(*this, E).isZero());
- if (AllocSize != AllocSizeWithoutCookie) {
+ if (allocSize != allocSizeWithoutCookie) {
assert(E->isArray());
- NewPtr = CGM.getCXXABI().InitializeArrayCookie(*this, NewPtr, NumElements,
- E, AllocType);
+ allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
+ numElements,
+ E, allocType);
}
// If there's an operator delete, enter a cleanup to call it if an
// exception is thrown.
- EHScopeStack::stable_iterator CallOperatorDelete;
+ EHScopeStack::stable_iterator operatorDeleteCleanup;
if (E->getOperatorDelete()) {
- EnterNewDeleteCleanup(*this, E, NewPtr, AllocSize, NewArgs);
- CallOperatorDelete = EHStack.stable_begin();
+ EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
+ operatorDeleteCleanup = EHStack.stable_begin();
}
- const llvm::Type *ElementPtrTy
- = ConvertTypeForMem(AllocType)->getPointerTo(AS);
- NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
+ const llvm::Type *elementPtrTy
+ = ConvertTypeForMem(allocType)->getPointerTo(AS);
+ llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
if (E->isArray()) {
- EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
+ EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
// NewPtr is a pointer to the base element type. If we're
// allocating an array of arrays, we'll need to cast back to the
// array pointer type.
- const llvm::Type *ResultTy = ConvertTypeForMem(E->getType());
- if (NewPtr->getType() != ResultTy)
- NewPtr = Builder.CreateBitCast(NewPtr, ResultTy);
+ const llvm::Type *resultType = ConvertTypeForMem(E->getType());
+ if (result->getType() != resultType)
+ result = Builder.CreateBitCast(result, resultType);
} else {
- EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
+ EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
}
// Deactivate the 'operator delete' cleanup if we finished
// initialization.
- if (CallOperatorDelete.isValid())
- DeactivateCleanupBlock(CallOperatorDelete);
+ if (operatorDeleteCleanup.isValid())
+ DeactivateCleanupBlock(operatorDeleteCleanup);
- if (NullCheckResult) {
- Builder.CreateBr(NewEnd);
- llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock();
- EmitBlock(NewEnd);
-
- llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
- PHI->reserveOperandSpace(2);
- PHI->addIncoming(NewPtr, NotNullSource);
- PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()),
- NullCheckSource);
-
- NewPtr = PHI;
+ if (nullCheck) {
+ conditional.end(*this);
+
+ llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
+ EmitBlock(contBB);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
+ PHI->addIncoming(result, notNullBB);
+ PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
+ nullCheckBB);
+
+ result = PHI;
}
- return NewPtr;
+ return result;
}
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
@@ -1080,10 +1122,10 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
QualType ArgTy = DeleteFTy->getArgType(0);
llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
- DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
+ DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
if (Size)
- DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
+ DeleteArgs.add(RValue::get(Size), SizeTy);
// Emit the call to delete.
EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
@@ -1180,7 +1222,7 @@ namespace {
QualType VoidPtrTy = DeleteFTy->getArgType(0);
llvm::Value *DeletePtr
= CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
- Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
+ Args.add(RValue::get(DeletePtr), VoidPtrTy);
// Pass the original requested size as the second argument.
if (DeleteFTy->getNumArgs() == 2) {
@@ -1203,7 +1245,7 @@ namespace {
Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
}
- Args.push_back(std::make_pair(RValue::get(Size), size_t));
+ Args.add(RValue::get(Size), size_t);
}
// Emit the call to delete.
@@ -1264,9 +1306,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
- llvm::Value *IsNull =
- Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
- "isnull");
+ llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
EmitBlock(DeleteNotNull);
@@ -1306,173 +1346,253 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
EmitBlock(DeleteEnd);
}
+static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
+ // void __cxa_bad_typeid();
+
+ const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
+}
+
+static void EmitBadTypeidCall(CodeGenFunction &CGF) {
+ llvm::Value *Fn = getBadTypeidFn(CGF);
+ CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
+ const Expr *E,
+ const llvm::Type *StdTypeInfoPtrTy) {
+ // Get the vtable pointer.
+ llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
+
+ // C++ [expr.typeid]p2:
+ // If the glvalue expression is obtained by applying the unary * operator to
+ // a pointer and the pointer is a null pointer value, the typeid expression
+ // throws the std::bad_typeid exception.
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
+ if (UO->getOpcode() == UO_Deref) {
+ llvm::BasicBlock *BadTypeidBlock =
+ CGF.createBasicBlock("typeid.bad_typeid");
+ llvm::BasicBlock *EndBlock =
+ CGF.createBasicBlock("typeid.end");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
+ CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
+
+ CGF.EmitBlock(BadTypeidBlock);
+ EmitBadTypeidCall(CGF);
+ CGF.EmitBlock(EndBlock);
+ }
+ }
+
+ llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
+ StdTypeInfoPtrTy->getPointerTo());
+
+ // Load the type info.
+ Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
+ return CGF.Builder.CreateLoad(Value);
+}
+
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
- QualType Ty = E->getType();
- const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
+ const llvm::Type *StdTypeInfoPtrTy =
+ ConvertType(E->getType())->getPointerTo();
if (E->isTypeOperand()) {
llvm::Constant *TypeInfo =
CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
- return Builder.CreateBitCast(TypeInfo, LTy);
+ return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
}
-
- Expr *subE = E->getExprOperand();
- Ty = subE->getType();
- CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
- Ty = CanTy.getUnqualifiedType().getNonReferenceType();
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->isPolymorphic()) {
- // FIXME: if subE is an lvalue do
- LValue Obj = EmitLValue(subE);
- llvm::Value *This = Obj.getAddress();
- // We need to do a zero check for *p, unless it has NonNullAttr.
- // FIXME: PointerType->hasAttr<NonNullAttr>()
- bool CanBeZero = false;
- if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
- if (UO->getOpcode() == UO_Deref)
- CanBeZero = true;
- if (CanBeZero) {
- llvm::BasicBlock *NonZeroBlock = createBasicBlock();
- llvm::BasicBlock *ZeroBlock = createBasicBlock();
-
- llvm::Value *Zero = llvm::Constant::getNullValue(This->getType());
- Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero),
- NonZeroBlock, ZeroBlock);
- EmitBlock(ZeroBlock);
- /// Call __cxa_bad_typeid
- const llvm::Type *ResultType = llvm::Type::getVoidTy(getLLVMContext());
- const llvm::FunctionType *FTy;
- FTy = llvm::FunctionType::get(ResultType, false);
- llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
- Builder.CreateCall(F)->setDoesNotReturn();
- Builder.CreateUnreachable();
- EmitBlock(NonZeroBlock);
- }
- llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo());
- V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
- V = Builder.CreateLoad(V);
- return V;
+
+ // C++ [expr.typeid]p2:
+ // When typeid is applied to a glvalue expression whose type is a
+ // polymorphic class type, the result refers to a std::type_info object
+ // representing the type of the most derived object (that is, the dynamic
+ // type) to which the glvalue refers.
+ if (E->getExprOperand()->isGLValue()) {
+ if (const RecordType *RT =
+ E->getExprOperand()->getType()->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->isPolymorphic())
+ return EmitTypeidFromVTable(*this, E->getExprOperand(),
+ StdTypeInfoPtrTy);
}
}
- return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
+
+ QualType OperandTy = E->getExprOperand()->getType();
+ return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
+ StdTypeInfoPtrTy);
}
-llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
- const CXXDynamicCastExpr *DCE) {
- QualType SrcTy = DCE->getSubExpr()->getType();
- QualType DestTy = DCE->getTypeAsWritten();
- QualType InnerType = DestTy->getPointeeType();
+static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
+ // void *__dynamic_cast(const void *sub,
+ // const abi::__class_type_info *src,
+ // const abi::__class_type_info *dst,
+ // std::ptrdiff_t src2dst_offset);
- const llvm::Type *LTy = ConvertType(DCE->getType());
-
- bool CanBeZero = false;
- bool ToVoid = false;
- bool ThrowOnBad = false;
- if (DestTy->isPointerType()) {
- // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
- CanBeZero = true;
- if (InnerType->isVoidType())
- ToVoid = true;
- } else {
- LTy = LTy->getPointerTo();
-
- // FIXME: What if exceptions are disabled?
- ThrowOnBad = true;
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ const llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
+}
+
+static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
+ // void __cxa_bad_cast();
+
+ const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
+}
+
+static void EmitBadCastCall(CodeGenFunction &CGF) {
+ llvm::Value *Fn = getBadCastFn(CGF);
+ CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+static llvm::Value *
+EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
+ QualType SrcTy, QualType DestTy,
+ llvm::BasicBlock *CastEnd) {
+ const llvm::Type *PtrDiffLTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+ const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+
+ if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
+ if (PTy->getPointeeType()->isVoidType()) {
+ // C++ [expr.dynamic.cast]p7:
+ // If T is "pointer to cv void," then the result is a pointer to the
+ // most derived object pointed to by v.
+
+ // Get the vtable pointer.
+ llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
+
+ // Get the offset-to-top from the vtable.
+ llvm::Value *OffsetToTop =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
+ OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
+
+ // Finally, add the offset to the pointer.
+ Value = CGF.EmitCastToVoidPtr(Value);
+ Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
+
+ return CGF.Builder.CreateBitCast(Value, DestLTy);
+ }
}
- if (SrcTy->isPointerType() || SrcTy->isReferenceType())
- SrcTy = SrcTy->getPointeeType();
- SrcTy = SrcTy.getUnqualifiedType();
-
- if (DestTy->isPointerType() || DestTy->isReferenceType())
- DestTy = DestTy->getPointeeType();
- DestTy = DestTy.getUnqualifiedType();
-
- llvm::BasicBlock *ContBlock = createBasicBlock();
- llvm::BasicBlock *NullBlock = 0;
- llvm::BasicBlock *NonZeroBlock = 0;
- if (CanBeZero) {
- NonZeroBlock = createBasicBlock();
- NullBlock = createBasicBlock();
- Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
- EmitBlock(NonZeroBlock);
+ QualType SrcRecordTy;
+ QualType DestRecordTy;
+
+ if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
+ SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
+ DestRecordTy = DestPTy->getPointeeType();
+ } else {
+ SrcRecordTy = SrcTy;
+ DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
}
- llvm::BasicBlock *BadCastBlock = 0;
+ assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
+ assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
- const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
+ llvm::Value *SrcRTTI =
+ CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
+ llvm::Value *DestRTTI =
+ CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
- // See if this is a dynamic_cast(void*)
- if (ToVoid) {
- llvm::Value *This = V;
- V = GetVTablePtr(This, PtrDiffTy->getPointerTo());
- V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
- V = Builder.CreateLoad(V, "offset to top");
- This = EmitCastToVoidPtr(This);
- V = Builder.CreateInBoundsGEP(This, V);
- V = Builder.CreateBitCast(V, LTy);
- } else {
- /// Call __dynamic_cast
- const llvm::Type *ResultType = Int8PtrTy;
- const llvm::FunctionType *FTy;
- std::vector<const llvm::Type*> ArgTys;
- ArgTys.push_back(Int8PtrTy);
- ArgTys.push_back(Int8PtrTy);
- ArgTys.push_back(Int8PtrTy);
- ArgTys.push_back(PtrDiffTy);
- FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
-
- // FIXME: Calculate better hint.
- llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
-
- assert(SrcTy->isRecordType() && "Src type must be record type!");
- assert(DestTy->isRecordType() && "Dest type must be record type!");
-
- llvm::Value *SrcArg
- = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
- llvm::Value *DestArg
- = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
-
- V = Builder.CreateBitCast(V, Int8PtrTy);
- V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
- V, SrcArg, DestArg, hint);
- V = Builder.CreateBitCast(V, LTy);
-
- if (ThrowOnBad) {
- BadCastBlock = createBasicBlock();
- Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
- EmitBlock(BadCastBlock);
- /// Invoke __cxa_bad_cast
- ResultType = llvm::Type::getVoidTy(getLLVMContext());
- const llvm::FunctionType *FBadTy;
- FBadTy = llvm::FunctionType::get(ResultType, false);
- llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
- if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
- EmitBlock(Cont);
- } else {
- // FIXME: Does this ever make sense?
- Builder.CreateCall(F)->setDoesNotReturn();
- }
- Builder.CreateUnreachable();
- }
+ // FIXME: Actually compute a hint here.
+ llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
+
+ // Emit the call to __dynamic_cast.
+ Value = CGF.EmitCastToVoidPtr(Value);
+ Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
+ SrcRTTI, DestRTTI, OffsetHint);
+ Value = CGF.Builder.CreateBitCast(Value, DestLTy);
+
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ if (DestTy->isReferenceType()) {
+ llvm::BasicBlock *BadCastBlock =
+ CGF.createBasicBlock("dynamic_cast.bad_cast");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
+ CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
+
+ CGF.EmitBlock(BadCastBlock);
+ EmitBadCastCall(CGF);
}
+
+ return Value;
+}
+
+static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
+ QualType DestTy) {
+ const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ if (DestTy->isPointerType())
+ return llvm::Constant::getNullValue(DestLTy);
+
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ EmitBadCastCall(CGF);
+
+ CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
+ return llvm::UndefValue::get(DestLTy);
+}
+
+llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
+ const CXXDynamicCastExpr *DCE) {
+ QualType DestTy = DCE->getTypeAsWritten();
+
+ if (DCE->isAlwaysNull())
+ return EmitDynamicCastToNull(*this, DestTy);
+
+ QualType SrcTy = DCE->getSubExpr()->getType();
+
+ // C++ [expr.dynamic.cast]p4:
+ // If the value of v is a null pointer value in the pointer case, the result
+ // is the null pointer value of type T.
+ bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
- if (CanBeZero) {
- Builder.CreateBr(ContBlock);
- EmitBlock(NullBlock);
- Builder.CreateBr(ContBlock);
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
+
+ if (ShouldNullCheckSrcValue) {
+ CastNull = createBasicBlock("dynamic_cast.null");
+ CastNotNull = createBasicBlock("dynamic_cast.notnull");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
+
+ if (ShouldNullCheckSrcValue) {
+ EmitBranch(CastEnd);
+
+ EmitBlock(CastNull);
+ EmitBranch(CastEnd);
}
- EmitBlock(ContBlock);
- if (CanBeZero) {
- llvm::PHINode *PHI = Builder.CreatePHI(LTy);
- PHI->reserveOperandSpace(2);
- PHI->addIncoming(V, NonZeroBlock);
- PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
- V = PHI;
+
+ EmitBlock(CastEnd);
+
+ if (ShouldNullCheckSrcValue) {
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
+
+ Value = PHI;
}
- return V;
+ return Value;
}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 7b0292b..bd19586 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -108,6 +108,9 @@ public:
}
ComplexPairTy VisitExpr(Expr *S);
ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
+ ComplexPairTy VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
// l-values.
@@ -668,14 +671,12 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.end(CGF);
// Create a PHI node for the real part.
- llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r");
- RealPN->reserveOperandSpace(2);
+ llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), 2, "cond.r");
RealPN->addIncoming(LHS.first, LHSBlock);
RealPN->addIncoming(RHS.first, RHSBlock);
// Create a PHI node for the imaginary part.
- llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), "cond.i");
- ImagPN->reserveOperandSpace(2);
+ llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), 2, "cond.i");
ImagPN->addIncoming(LHS.second, LHSBlock);
ImagPN->addIncoming(RHS.second, RHSBlock);
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 40d7b6c..463b913 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -38,8 +38,8 @@ class ConstStructBuilder {
CodeGenFunction *CGF;
bool Packed;
- unsigned NextFieldOffsetInBytes;
- unsigned LLVMStructAlignment;
+ CharUnits NextFieldOffsetInChars;
+ CharUnits LLVMStructAlignment;
std::vector<llvm::Constant *> Elements;
public:
static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
@@ -47,8 +47,9 @@ public:
private:
ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
- : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
- LLVMStructAlignment(1) { }
+ : CGM(CGM), CGF(CGF), Packed(false),
+ NextFieldOffsetInChars(CharUnits::Zero()),
+ LLVMStructAlignment(CharUnits::One()) { }
bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::Constant *InitExpr);
@@ -56,64 +57,71 @@ private:
void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::ConstantInt *InitExpr);
- void AppendPadding(uint64_t NumBytes);
+ void AppendPadding(CharUnits PadSize);
- void AppendTailPadding(uint64_t RecordSize);
+ void AppendTailPadding(CharUnits RecordSize);
void ConvertStructToPacked();
bool Build(InitListExpr *ILE);
- unsigned getAlignment(const llvm::Constant *C) const {
- if (Packed) return 1;
- return CGM.getTargetData().getABITypeAlignment(C->getType());
+ CharUnits getAlignment(const llvm::Constant *C) const {
+ if (Packed) return CharUnits::One();
+ return CharUnits::fromQuantity(
+ CGM.getTargetData().getABITypeAlignment(C->getType()));
}
- uint64_t getSizeInBytes(const llvm::Constant *C) const {
- return CGM.getTargetData().getTypeAllocSize(C->getType());
+ CharUnits getSizeInChars(const llvm::Constant *C) const {
+ return CharUnits::fromQuantity(
+ CGM.getTargetData().getTypeAllocSize(C->getType()));
}
};
bool ConstStructBuilder::
AppendField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::Constant *InitCst) {
- uint64_t FieldOffsetInBytes = FieldOffset / 8;
- assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
+ const ASTContext &Context = CGM.getContext();
+
+ CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
+
+ assert(NextFieldOffsetInChars <= FieldOffsetInChars
&& "Field offset mismatch!");
- unsigned FieldAlignment = getAlignment(InitCst);
+ CharUnits FieldAlignment = getAlignment(InitCst);
// Round up the field offset to the alignment of the field type.
- uint64_t AlignedNextFieldOffsetInBytes =
- llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+ CharUnits AlignedNextFieldOffsetInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
- if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
+ if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
assert(!Packed && "Alignment is wrong even with a packed struct!");
// Convert the struct to a packed struct.
ConvertStructToPacked();
- AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+ AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
}
- if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
// We need to append padding.
- AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
+ AppendPadding(
+ FieldOffsetInChars - NextFieldOffsetInChars);
- assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
+ assert(NextFieldOffsetInChars == FieldOffsetInChars &&
"Did not add enough padding!");
- AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+ AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
}
// Add the field.
Elements.push_back(InitCst);
- NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes +
- getSizeInBytes(InitCst);
+ NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
+ getSizeInChars(InitCst);
if (Packed)
- assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
+ assert(LLVMStructAlignment == CharUnits::One() &&
+ "Packed struct not byte-aligned!");
else
LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
@@ -123,17 +131,20 @@ AppendField(const FieldDecl *Field, uint64_t FieldOffset,
void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
uint64_t FieldOffset,
llvm::ConstantInt *CI) {
- if (FieldOffset > NextFieldOffsetInBytes * 8) {
+ const ASTContext &Context = CGM.getContext();
+ const uint64_t CharWidth = Context.getCharWidth();
+ uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
+ if (FieldOffset > NextFieldOffsetInBits) {
// We need to add padding.
- uint64_t NumBytes =
- llvm::RoundUpToAlignment(FieldOffset -
- NextFieldOffsetInBytes * 8, 8) / 8;
+ CharUnits PadSize = Context.toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
+ Context.Target.getCharAlign()));
- AppendPadding(NumBytes);
+ AppendPadding(PadSize);
}
uint64_t FieldSize =
- Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ Field->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
llvm::APInt FieldValue = CI->getValue();
@@ -148,13 +159,13 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
if (FieldSize < FieldValue.getBitWidth())
FieldValue = FieldValue.trunc(FieldSize);
- if (FieldOffset < NextFieldOffsetInBytes * 8) {
+ NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
+ if (FieldOffset < NextFieldOffsetInBits) {
// Either part of the field or the entire field can go into the previous
// byte.
assert(!Elements.empty() && "Elements can't be empty!");
- unsigned BitsInPreviousByte =
- NextFieldOffsetInBytes * 8 - FieldOffset;
+ unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
bool FitsCompletelyInPreviousByte =
BitsInPreviousByte >= FieldValue.getBitWidth();
@@ -179,12 +190,12 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
}
}
- Tmp = Tmp.zext(8);
+ Tmp = Tmp.zext(CharWidth);
if (CGM.getTargetData().isBigEndian()) {
if (FitsCompletelyInPreviousByte)
Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
} else {
- Tmp = Tmp.shl(8 - BitsInPreviousByte);
+ Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
}
// 'or' in the bits that go into the previous byte.
@@ -203,19 +214,19 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
assert(isa<llvm::ArrayType>(LastElt->getType()) &&
"Expected array padding of undefs");
const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
- assert(AT->getElementType()->isIntegerTy(8) &&
+ assert(AT->getElementType()->isIntegerTy(CharWidth) &&
AT->getNumElements() != 0 &&
"Expected non-empty array padding of undefs");
// Remove the padding array.
- NextFieldOffsetInBytes -= AT->getNumElements();
+ NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
Elements.pop_back();
// Add the padding back in two chunks.
- AppendPadding(AT->getNumElements()-1);
- AppendPadding(1);
+ AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
+ AppendPadding(CharUnits::One());
assert(isa<llvm::UndefValue>(Elements.back()) &&
- Elements.back()->getType()->isIntegerTy(8) &&
+ Elements.back()->getType()->isIntegerTy(CharWidth) &&
"Padding addition didn't work right");
}
}
@@ -226,105 +237,104 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
return;
}
- while (FieldValue.getBitWidth() > 8) {
+ while (FieldValue.getBitWidth() > CharWidth) {
llvm::APInt Tmp;
if (CGM.getTargetData().isBigEndian()) {
// We want the high bits.
- Tmp = FieldValue.lshr(FieldValue.getBitWidth() - 8).trunc(8);
+ Tmp =
+ FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
} else {
// We want the low bits.
- Tmp = FieldValue.trunc(8);
+ Tmp = FieldValue.trunc(CharWidth);
- FieldValue = FieldValue.lshr(8);
+ FieldValue = FieldValue.lshr(CharWidth);
}
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
- NextFieldOffsetInBytes++;
+ ++NextFieldOffsetInChars;
- FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - 8);
+ FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
}
assert(FieldValue.getBitWidth() > 0 &&
"Should have at least one bit left!");
- assert(FieldValue.getBitWidth() <= 8 &&
+ assert(FieldValue.getBitWidth() <= CharWidth &&
"Should not have more than a byte left!");
- if (FieldValue.getBitWidth() < 8) {
+ if (FieldValue.getBitWidth() < CharWidth) {
if (CGM.getTargetData().isBigEndian()) {
unsigned BitWidth = FieldValue.getBitWidth();
- FieldValue = FieldValue.zext(8) << (8 - BitWidth);
+ FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
} else
- FieldValue = FieldValue.zext(8);
+ FieldValue = FieldValue.zext(CharWidth);
}
// Append the last element.
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
FieldValue));
- NextFieldOffsetInBytes++;
+ ++NextFieldOffsetInChars;
}
-void ConstStructBuilder::AppendPadding(uint64_t NumBytes) {
- if (!NumBytes)
+void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
+ if (PadSize.isZero())
return;
const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
- if (NumBytes > 1)
- Ty = llvm::ArrayType::get(Ty, NumBytes);
+ if (PadSize > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
llvm::Constant *C = llvm::UndefValue::get(Ty);
Elements.push_back(C);
- assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
+ assert(getAlignment(C) == CharUnits::One() &&
+ "Padding must have 1 byte alignment!");
- NextFieldOffsetInBytes += getSizeInBytes(C);
+ NextFieldOffsetInChars += getSizeInChars(C);
}
-void ConstStructBuilder::AppendTailPadding(uint64_t RecordSize) {
- assert(RecordSize % 8 == 0 && "Invalid record size!");
+void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
+ assert(NextFieldOffsetInChars <= RecordSize &&
+ "Size mismatch!");
- uint64_t RecordSizeInBytes = RecordSize / 8;
- assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
-
- unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
- AppendPadding(NumPadBytes);
+ AppendPadding(RecordSize - NextFieldOffsetInChars);
}
void ConstStructBuilder::ConvertStructToPacked() {
std::vector<llvm::Constant *> PackedElements;
- uint64_t ElementOffsetInBytes = 0;
+ CharUnits ElementOffsetInChars = CharUnits::Zero();
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
llvm::Constant *C = Elements[i];
- unsigned ElementAlign =
- CGM.getTargetData().getABITypeAlignment(C->getType());
- uint64_t AlignedElementOffsetInBytes =
- llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
+ CharUnits ElementAlign = CharUnits::fromQuantity(
+ CGM.getTargetData().getABITypeAlignment(C->getType()));
+ CharUnits AlignedElementOffsetInChars =
+ ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
- if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
+ if (AlignedElementOffsetInChars > ElementOffsetInChars) {
// We need some padding.
- uint64_t NumBytes =
- AlignedElementOffsetInBytes - ElementOffsetInBytes;
+ CharUnits NumChars =
+ AlignedElementOffsetInChars - ElementOffsetInChars;
const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
- if (NumBytes > 1)
- Ty = llvm::ArrayType::get(Ty, NumBytes);
+ if (NumChars > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
llvm::Constant *Padding = llvm::UndefValue::get(Ty);
PackedElements.push_back(Padding);
- ElementOffsetInBytes += getSizeInBytes(Padding);
+ ElementOffsetInChars += getSizeInChars(Padding);
}
PackedElements.push_back(C);
- ElementOffsetInBytes += getSizeInBytes(C);
+ ElementOffsetInChars += getSizeInChars(C);
}
- assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
+ assert(ElementOffsetInChars == NextFieldOffsetInChars &&
"Packing the struct changed its size!");
Elements = PackedElements;
- LLVMStructAlignment = 1;
+ LLVMStructAlignment = CharUnits::One();
Packed = true;
}
@@ -334,16 +344,31 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
unsigned FieldNo = 0;
unsigned ElementNo = 0;
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD) ||
+ CGM.getContext().ZeroBitfieldFollowsBitfield((*Field), LastFD)) {
+ --FieldNo;
+ continue;
+ }
+ LastFD = (*Field);
+ }
// If this is a union, skip all the fields that aren't being initialized.
if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
continue;
// Don't emit anonymous bitfields, they just affect layout.
- if (Field->isBitField() && !Field->getIdentifier())
+ if (Field->isBitField() && !Field->getIdentifier()) {
+ LastFD = (*Field);
continue;
+ }
// Get the initializer. A struct can include fields without initializers,
// we just use explicit null values for them.
@@ -368,9 +393,9 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
}
}
- uint64_t LayoutSizeInBytes = Layout.getSize().getQuantity();
+ CharUnits LayoutSizeInChars = Layout.getSize();
- if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
+ if (NextFieldOffsetInChars > LayoutSizeInChars) {
// If the struct is bigger than the size of the record type,
// we must have a flexible array member at the end.
assert(RD->hasFlexibleArrayMember() &&
@@ -380,23 +405,23 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
return true;
}
- uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
- LLVMStructAlignment);
+ CharUnits LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
// Check if we need to convert the struct to a packed struct.
- if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
- LLVMSizeInBytes > LayoutSizeInBytes) {
+ if (NextFieldOffsetInChars <= LayoutSizeInChars &&
+ LLVMSizeInChars > LayoutSizeInChars) {
assert(!Packed && "Size mismatch!");
ConvertStructToPacked();
- assert(NextFieldOffsetInBytes <= LayoutSizeInBytes &&
+ assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
"Converting to packed did not help!");
}
// Append tail padding if necessary.
- AppendTailPadding(CGM.getContext().toBits(Layout.getSize()));
+ AppendTailPadding(LayoutSizeInChars);
- assert(Layout.getSize().getQuantity() == NextFieldOffsetInBytes &&
+ assert(LayoutSizeInChars == NextFieldOffsetInChars &&
"Tail padding mismatch!");
return true;
@@ -413,9 +438,9 @@ llvm::Constant *ConstStructBuilder::
llvm::ConstantStruct::get(CGM.getLLVMContext(),
Builder.Elements, Builder.Packed);
- assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
- Builder.getAlignment(Result)) ==
- Builder.getSizeInBytes(Result) && "Size mismatch!");
+ assert(Builder.NextFieldOffsetInChars.RoundUpToAlignment(
+ Builder.getAlignment(Result)) ==
+ Builder.getSizeInChars(Result) && "Size mismatch!");
return Result;
}
@@ -447,6 +472,10 @@ public:
return Visit(PE->getSubExpr());
}
+ llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
+
llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
return Visit(E->getInitializer());
}
@@ -480,18 +509,17 @@ public:
}
llvm::Constant *VisitCastExpr(CastExpr* E) {
+ Expr *subExpr = E->getSubExpr();
+ llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
+ if (!C) return 0;
+
+ const llvm::Type *destType = ConvertType(E->getType());
+
switch (E->getCastKind()) {
case CK_ToUnion: {
// GCC cast to union extension
assert(E->getType()->isUnionType() &&
"Destination type is not union type!");
- const llvm::Type *Ty = ConvertType(E->getType());
- Expr *SubExpr = E->getSubExpr();
-
- llvm::Constant *C =
- CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
- if (!C)
- return 0;
// Build a struct with the union sub-element as the first member,
// and padded to the appropriate size
@@ -500,7 +528,7 @@ public:
Elts.push_back(C);
Types.push_back(C->getType());
unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
- unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
+ unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(destType);
assert(CurSize <= TotalSize && "Union size mismatch!");
if (unsigned NumPadBytes = TotalSize - CurSize) {
@@ -520,40 +548,103 @@ public:
const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
return CGM.getCXXABI().EmitNullMemberPointer(MPT);
}
-
- case CK_BaseToDerivedMemberPointer: {
- Expr *SubExpr = E->getSubExpr();
- llvm::Constant *C =
- CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
- if (!C) return 0;
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
return CGM.getCXXABI().EmitMemberPointerConversion(C, E);
- }
- case CK_BitCast:
- // This must be a member function pointer cast.
- return Visit(E->getSubExpr());
-
- default: {
- // FIXME: This should be handled by the CK_NoOp cast kind.
- // Explicit and implicit no-op casts
- QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
- if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
- return Visit(E->getSubExpr());
-
- // Handle integer->integer casts for address-of-label differences.
- if (Ty->isIntegerType() && SubTy->isIntegerType() &&
- CGF) {
- llvm::Value *Src = Visit(E->getSubExpr());
- if (Src == 0) return 0;
-
- // Use EmitScalarConversion to perform the conversion.
- return cast<llvm::Constant>(CGF->EmitScalarConversion(Src, SubTy, Ty));
- }
-
+ case CK_LValueToRValue:
+ case CK_NoOp:
+ return C;
+
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_LValueBitCast:
+ case CK_BitCast:
+ if (C->getType() == destType) return C;
+ return llvm::ConstantExpr::getBitCast(C, destType);
+
+ case CK_Dependent: llvm_unreachable("saw dependent cast!");
+
+ // These will never be supported.
+ case CK_ObjCObjectLValueCast:
+ case CK_GetObjCProperty:
+ case CK_ToVoid:
+ case CK_Dynamic:
return 0;
+
+ // These might need to be supported for constexpr.
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ return 0;
+
+ // These should eventually be supported.
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_MemberPointerToBoolean:
+ case CK_VectorSplat:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ return 0;
+
+ case CK_PointerToIntegral:
+ if (!E->getType()->isBooleanType())
+ return llvm::ConstantExpr::getPtrToInt(C, destType);
+ // fallthrough
+
+ case CK_PointerToBoolean:
+ return llvm::ConstantExpr::getICmp(llvm::CmpInst::ICMP_EQ, C,
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(C->getType())));
+
+ case CK_NullToPointer:
+ return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destType));
+
+ case CK_IntegralCast: {
+ bool isSigned = subExpr->getType()->isSignedIntegerType();
+ return llvm::ConstantExpr::getIntegerCast(C, destType, isSigned);
+ }
+
+ case CK_IntegralToPointer: {
+ bool isSigned = subExpr->getType()->isSignedIntegerType();
+ C = llvm::ConstantExpr::getIntegerCast(C, CGM.IntPtrTy, isSigned);
+ return llvm::ConstantExpr::getIntToPtr(C, destType);
}
+
+ case CK_IntegralToBoolean:
+ return llvm::ConstantExpr::getICmp(llvm::CmpInst::ICMP_EQ, C,
+ llvm::Constant::getNullValue(C->getType()));
+
+ case CK_IntegralToFloating:
+ if (subExpr->getType()->isSignedIntegerType())
+ return llvm::ConstantExpr::getSIToFP(C, destType);
+ else
+ return llvm::ConstantExpr::getUIToFP(C, destType);
+
+ case CK_FloatingToIntegral:
+ if (E->getType()->isSignedIntegerType())
+ return llvm::ConstantExpr::getFPToSI(C, destType);
+ else
+ return llvm::ConstantExpr::getFPToUI(C, destType);
+
+ case CK_FloatingToBoolean:
+ return llvm::ConstantExpr::getFCmp(llvm::CmpInst::FCMP_UNE, C,
+ llvm::Constant::getNullValue(C->getType()));
+
+ case CK_FloatingCast:
+ return llvm::ConstantExpr::getFPCast(C, destType);
}
+ llvm_unreachable("Invalid CastKind");
}
llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
@@ -562,7 +653,7 @@ public:
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
unsigned NumInitElements = ILE->getNumInits();
- if (NumInitElements == 1 &&
+ if (NumInitElements == 1 && ILE->getType() == ILE->getInit(0)->getType() &&
(isa<StringLiteral>(ILE->getInit(0)) ||
isa<ObjCEncodeExpr>(ILE->getInit(0))))
return Visit(ILE->getInit(0));
@@ -591,8 +682,16 @@ public:
// Initialize remaining array elements.
// FIXME: This doesn't handle member pointers correctly!
+ llvm::Constant *fillC;
+ if (Expr *filler = ILE->getArrayFiller())
+ fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
+ else
+ fillC = llvm::Constant::getNullValue(ElemTy);
+ if (!fillC)
+ return 0;
+ RewriteType |= (fillC->getType() != ElemTy);
for (; i < NumElements; ++i)
- Elts.push_back(llvm::Constant::getNullValue(ElemTy));
+ Elts.push_back(fillC);
if (RewriteType) {
// FIXME: Try to avoid packing the array
@@ -730,7 +829,7 @@ public:
E->getType().isConstant(CGM.getContext()),
llvm::GlobalValue::InternalLinkage,
C, ".compoundliteral", 0, false,
- E->getType().getAddressSpace());
+ CGM.getContext().getTargetAddressSpace(E->getType()));
return C;
}
case Expr::DeclRefExprClass: {
@@ -907,12 +1006,29 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
llvm::SmallVector<llvm::Constant *, 4> Inits;
unsigned NumElts = Result.Val.getVectorLength();
- for (unsigned i = 0; i != NumElts; ++i) {
- APValue &Elt = Result.Val.getVectorElt(i);
- if (Elt.isInt())
- Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
- else
- Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
+ if (Context.getLangOptions().AltiVec &&
+ isa<CastExpr>(E) &&
+ cast<CastExpr>(E)->getCastKind() == CK_VectorSplat) {
+ // AltiVec vector initialization with a single literal
+ APValue &Elt = Result.Val.getVectorElt(0);
+
+ llvm::Constant* InitValue = Elt.isInt()
+ ? cast<llvm::Constant>
+ (llvm::ConstantInt::get(VMContext, Elt.getInt()))
+ : cast<llvm::Constant>
+ (llvm::ConstantFP::get(VMContext, Elt.getFloat()));
+
+ for (unsigned i = 0; i != NumElts; ++i)
+ Inits.push_back(InitValue);
+
+ } else {
+ for (unsigned i = 0; i != NumElts; ++i) {
+ APValue &Elt = Result.Val.getVectorElt(i);
+ if (Elt.isInt())
+ Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
+ else
+ Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
+ }
}
return llvm::ConstantVector::get(Inits);
}
@@ -963,7 +1079,8 @@ static void
FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
std::vector<llvm::Constant *> &Elements,
uint64_t StartOffset) {
- assert(StartOffset % 8 == 0 && "StartOffset not byte aligned!");
+ assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
+ "StartOffset not byte aligned!");
if (CGM.getTypes().isZeroInitializable(T))
return;
@@ -1022,8 +1139,8 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
"Should only see pointers to data members here!");
- uint64_t StartIndex = StartOffset / 8;
- uint64_t EndIndex = StartIndex + CGM.getContext().getTypeSize(T) / 8;
+ CharUnits StartIndex = CGM.getContext().toCharUnitsFromBits(StartOffset);
+ CharUnits EndIndex = StartIndex + CGM.getContext().getTypeSizeInChars(T);
// FIXME: hardcodes Itanium member pointer representation!
llvm::Constant *NegativeOne =
@@ -1031,8 +1148,8 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
-1ULL, /*isSigned*/true);
// Fill in the null data member pointer.
- for (uint64_t I = StartIndex; I != EndIndex; ++I)
- Elements[I] = NegativeOne;
+ for (CharUnits I = StartIndex; I != EndIndex; ++I)
+ Elements[I.getQuantity()] = NegativeOne;
}
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 3e1debd..6bcc425 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -140,9 +140,7 @@ public:
}
}
- const llvm::IntegerType *Ty = cast<llvm::IntegerType>(V->getType());
- Value *Zero = llvm::ConstantInt::get(Ty, 0);
- return Builder.CreateICmpNE(V, Zero, "tobool");
+ return Builder.CreateIsNotNull(V, "tobool");
}
//===--------------------------------------------------------------------===//
@@ -163,10 +161,13 @@ public:
Value *VisitParenExpr(ParenExpr *PE) {
return Visit(PE->getSubExpr());
}
+ Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
// Leaves.
Value *VisitIntegerLiteral(const IntegerLiteral *E) {
- return llvm::ConstantInt::get(VMContext, E->getValue());
+ return Builder.getInt(E->getValue());
}
Value *VisitFloatingLiteral(const FloatingLiteral *E) {
return llvm::ConstantFP::get(VMContext, E->getValue());
@@ -184,15 +185,14 @@ public:
return EmitNullValue(E->getType());
}
Value *VisitOffsetOfExpr(OffsetOfExpr *E);
- Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
+ Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
return Builder.CreateBitCast(V, ConvertType(E->getType()));
}
Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
- return llvm::ConstantInt::get(ConvertType(E->getType()),
- E->getPackLength());
+ return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
}
Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
@@ -212,13 +212,12 @@ public:
assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
llvm::Constant *C;
- if (Result.Val.isInt()) {
- C = llvm::ConstantInt::get(VMContext, Result.Val.getInt());
- } else if (Result.Val.isFloat()) {
+ if (Result.Val.isInt())
+ C = Builder.getInt(Result.Val.getInt());
+ else if (Result.Val.isFloat())
C = llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
- } else {
+ else
return EmitLoadOfLValue(E);
- }
// Make sure we emit a debug reference to the global variable.
if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) {
@@ -245,6 +244,9 @@ public:
return EmitLoadOfLValue(E);
}
Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (E->getMethodDecl() &&
+ E->getMethodDecl()->getResultType()->isReferenceType())
+ return EmitLoadOfLValue(E);
return CGF.EmitObjCMessageExpr(E).getScalarVal();
}
@@ -358,13 +360,21 @@ public:
return 0;
}
Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
- return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
+ return Builder.getInt1(E->getValue());
}
Value *VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
+ Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
+ }
+
+ Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
+ }
+
Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
// C++ [expr.pseudo]p1:
// The result shall only be used as the operand for the function call
@@ -385,7 +395,7 @@ public:
}
Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
- return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
+ return Builder.getInt1(E->getValue());
}
// Binary Operators.
@@ -579,14 +589,14 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
- llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
+ llvm::Value *Idx = Builder.getInt32(0);
UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
for (unsigned i = 0; i != NumElements; ++i)
- Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
+ Args.push_back(Builder.getInt32(0));
llvm::Constant *Mask = llvm::ConstantVector::get(Args);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
@@ -683,8 +693,8 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
// Shuffle LHS & RHS into one input vector.
llvm::SmallVector<llvm::Constant*, 32> concat;
for (unsigned i = 0; i != LHSElts; ++i) {
- concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i));
- concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i+1));
+ concat.push_back(Builder.getInt32(2*i));
+ concat.push_back(Builder.getInt32(2*i+1));
}
Value* CV = llvm::ConstantVector::get(concat);
@@ -726,18 +736,16 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
MTy->getNumElements());
Value* NewV = llvm::UndefValue::get(RTy);
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
- Value *Indx = llvm::ConstantInt::get(CGF.Int32Ty, i);
+ Value *Indx = Builder.getInt32(i);
Indx = Builder.CreateExtractElement(Mask, Indx, "shuf_idx");
Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext");
// Handle vec3 special since the index will be off by one for the RHS.
if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) {
Value *cmpIndx, *newIndx;
- cmpIndx = Builder.CreateICmpUGT(Indx,
- llvm::ConstantInt::get(CGF.Int32Ty, 3),
+ cmpIndx = Builder.CreateICmpUGT(Indx, Builder.getInt32(3),
"cmp_shuf_idx");
- newIndx = Builder.CreateSub(Indx, llvm::ConstantInt::get(CGF.Int32Ty,1),
- "shuf_idx_adj");
+ newIndx = Builder.CreateSub(Indx, Builder.getInt32(1), "shuf_idx_adj");
Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx");
}
Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
@@ -775,7 +783,7 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
CGF.EmitScalarExpr(E->getBase());
else
EmitLValue(E->getBase());
- return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+ return Builder.getInt(Result.Val.getInt());
}
// Emit debug info for aggregate now, if it was delayed to reduce
@@ -875,8 +883,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
for (unsigned j = 0; j != CurIdx; ++j)
Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
- Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
- ResElts + C->getZExtValue()));
+ Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
for (unsigned j = CurIdx + 1; j != ResElts; ++j)
Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
@@ -892,8 +899,8 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
}
}
- Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx);
- V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
+ V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
+ "vecinit");
VIsUndefShuffle = false;
++CurIdx;
continue;
@@ -918,7 +925,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
CGF.Int32Ty));
} else {
- Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
+ Args.push_back(Builder.getInt32(j));
}
}
for (unsigned j = 0, je = InitElts; j != je; ++j)
@@ -937,7 +944,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// to the vector initializer into V.
if (Args.empty()) {
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
+ Args.push_back(Builder.getInt32(j));
for (unsigned j = InitElts; j != ResElts; ++j)
Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
llvm::Constant *Mask = llvm::ConstantVector::get(Args);
@@ -946,9 +953,9 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Args.clear();
for (unsigned j = 0; j != CurIdx; ++j)
- Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
+ Args.push_back(Builder.getInt32(j));
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j+Offset));
+ Args.push_back(Builder.getInt32(j+Offset));
for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
}
@@ -969,7 +976,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Emit remaining default initializers
for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
- Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx);
+ Value *Idx = Builder.getInt32(CurIdx);
llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
}
@@ -1123,7 +1130,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
RValue RV = CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType());
return RV.getScalarVal();
}
-
+
case CK_LValueToRValue:
assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
@@ -1160,13 +1167,13 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
- llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
+ llvm::Value *Idx = Builder.getInt32(0);
UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
- llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Int32Ty, 0);
+ llvm::Constant *Zero = Builder.getInt32(0);
for (unsigned i = 0; i < NumElements; i++)
Args.push_back(Zero);
@@ -1218,10 +1225,8 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
}
Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
- llvm::Value *V = CGF.GetAddrOfBlockDecl(E);
- if (E->getType().isObjCGCWeak())
- return CGF.CGM.getObjCRuntime().EmitObjCWeakRead(CGF, V);
- return CGF.EmitLoadOfScalar(V, false, 0, E->getType());
+ LValue LV = CGF.EmitBlockDeclRefLValue(E);
+ return CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal();
}
//===----------------------------------------------------------------------===//
@@ -1278,10 +1283,12 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
- if (type->isSignedIntegerType())
+ // Note that signed integer inc/dec with width less than int can't
+ // overflow because of promotion rules; we're just eliding a few steps here.
+ if (type->isSignedIntegerType() &&
+ value->getType()->getPrimitiveSizeInBits() >=
+ CGF.CGM.IntTy->getBitWidth())
value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
-
- // Unsigned integer inc is always two's complement.
else
value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
@@ -1295,21 +1302,30 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
CGF.GetVLASize(CGF.getContext().getAsVariableArrayType(type));
value = CGF.EmitCastToVoidPtr(value);
if (!isInc) vlaSize = Builder.CreateNSWNeg(vlaSize, "vla.negsize");
- value = Builder.CreateInBoundsGEP(value, vlaSize, "vla.inc");
+ if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, vlaSize, "vla.inc");
+ else
+ value = Builder.CreateInBoundsGEP(value, vlaSize, "vla.inc");
value = Builder.CreateBitCast(value, input->getType());
// Arithmetic on function pointers (!) is just +-1.
} else if (type->isFunctionType()) {
- llvm::Value *amt = llvm::ConstantInt::get(CGF.Int32Ty, amount);
+ llvm::Value *amt = Builder.getInt32(amount);
value = CGF.EmitCastToVoidPtr(value);
- value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
+ if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, amt, "incdec.funcptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
value = Builder.CreateBitCast(value, input->getType());
// For everything else, we can just do a simple increment.
} else {
- llvm::Value *amt = llvm::ConstantInt::get(CGF.Int32Ty, amount);
- value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
+ llvm::Value *amt = Builder.getInt32(amount);
+ if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, amt, "incdec.ptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
}
// Vector increment/decrement.
@@ -1357,7 +1373,10 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *sizeValue =
llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
- value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
+ if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
@@ -1413,7 +1432,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Try folding the offsetof to a constant.
Expr::EvalResult EvalResult;
if (E->Evaluate(EvalResult, CGF.getContext()))
- return llvm::ConstantInt::get(VMContext, EvalResult.Val.getInt());
+ return Builder.getInt(EvalResult.Val.getInt());
// Loop over the components of the offsetof to compute the value.
unsigned n = E->getNumComponents();
@@ -1499,12 +1518,13 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
return Result;
}
-/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
+/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
/// argument of the sizeof expression as an integer.
Value *
-ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
+ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *E) {
QualType TypeToSize = E->getTypeOfArgument();
- if (E->isSizeOf()) {
+ if (E->getKind() == UETT_SizeOf) {
if (const VariableArrayType *VAT =
CGF.getContext().getAsVariableArrayType(TypeToSize)) {
if (E->isArgumentType()) {
@@ -1524,7 +1544,7 @@ ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
// folding logic so we don't have to duplicate it here.
Expr::EvalResult Result;
E->Evaluate(Result, CGF.getContext());
- return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+ return Builder.getInt(Result.Val.getInt());
}
Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
@@ -1664,8 +1684,7 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
if (Ops.Ty->hasSignedIntegerRepresentation()) {
llvm::Value *IntMin =
- llvm::ConstantInt::get(VMContext,
- llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
+ Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero);
@@ -1715,7 +1734,7 @@ Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
}
- if (Ops.Ty->isUnsignedIntegerType())
+ if (Ops.Ty->hasUnsignedIntegerRepresentation())
return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
else
return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
@@ -1801,8 +1820,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Builder.CreateBr(continueBB);
Builder.SetInsertPoint(continueBB);
- llvm::PHINode *phi = Builder.CreatePHI(opTy);
- phi->reserveOperandSpace(2);
+ llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
phi->addIncoming(result, initialBB);
phi->addIncoming(handlerResult, overflowBB);
@@ -1889,6 +1907,8 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
return Builder.CreateBitCast(Res, Ptr->getType());
}
+ if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ return Builder.CreateGEP(Ptr, Idx, "add.ptr");
return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr");
}
@@ -1964,38 +1984,39 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
return Builder.CreateBitCast(Res, Ops.LHS->getType());
}
+ if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr");
- } else {
- // pointer - pointer
- Value *LHS = Ops.LHS;
- Value *RHS = Ops.RHS;
-
- CharUnits ElementSize;
-
- // Handle GCC extension for pointer arithmetic on void* and function pointer
- // types.
- if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
- ElementSize = CharUnits::One();
- } else {
- ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType);
- }
-
- const llvm::Type *ResultType = ConvertType(Ops.Ty);
- LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
- RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
- Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+ }
+
+ // pointer - pointer
+ Value *LHS = Ops.LHS;
+ Value *RHS = Ops.RHS;
- // Optimize out the shift for element size of 1.
- if (ElementSize.isOne())
- return BytesBetween;
+ CharUnits ElementSize;
- // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
- // pointer difference in C is only defined in the case where both operands
- // are pointing to elements of an array.
- Value *BytesPerElt =
- llvm::ConstantInt::get(ResultType, ElementSize.getQuantity());
- return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
- }
+ // Handle GCC extension for pointer arithmetic on void* and function pointer
+ // types.
+ if (LHSElementType->isVoidType() || LHSElementType->isFunctionType())
+ ElementSize = CharUnits::One();
+ else
+ ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType);
+
+ const llvm::Type *ResultType = ConvertType(Ops.Ty);
+ LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+
+ // Optimize out the shift for element size of 1.
+ if (ElementSize.isOne())
+ return BytesBetween;
+
+ // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
+ // pointer difference in C is only defined in the case where both operands
+ // are pointing to elements of an array.
+ Value *BytesPerElt =
+ llvm::ConstantInt::get(ResultType, ElementSize.getQuantity());
+ return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
}
Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
@@ -2100,7 +2121,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
// If AltiVec, the comparison results in a numeric type, so we use
// intrinsics comparing vectors and giving 0 or 1 as a result
- if (LHSTy->isVectorType() && CGF.getContext().getLangOptions().AltiVec) {
+ if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
// constants for mapping CR6 register bits to predicate result
enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
@@ -2157,7 +2178,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
break;
}
- Value *CR6Param = llvm::ConstantInt::get(CGF.Int32Ty, CR6);
+ Value *CR6Param = Builder.getInt32(CR6);
llvm::Function *F = CGF.CGM.getIntrinsic(ID);
Result = Builder.CreateCall3(F, CR6Param, FirstVecArg, SecondVecArg, "");
return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
@@ -2257,8 +2278,9 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
// If we have 1 && X, just emit X without inserting the control flow.
- if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
- if (Cond == 1) { // If we have 1 && X, just emit X.
+ bool LHSCondVal;
+ if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
+ if (LHSCondVal) { // If we have 1 && X, just emit X.
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
// ZExt result to int or bool.
return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
@@ -2280,9 +2302,8 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// Any edges into the ContBlock are now from an (indeterminate number of)
// edges from this first condition. All of these values will be false. Start
// setting up the PHI node in the Cont Block for this.
- llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext),
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
"", ContBlock);
- PN->reserveOperandSpace(2); // Normal case, two inputs.
for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
PI != PE; ++PI)
PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
@@ -2297,6 +2318,9 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// Emit an unconditional branch from this block to ContBlock. Insert an entry
// into the phi node for the edge with the value of RHSCond.
+ if (CGF.getDebugInfo())
+ // There is no need to emit line number for unconditional branch.
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
CGF.EmitBlock(ContBlock);
PN->addIncoming(RHSCond, RHSBlock);
@@ -2309,8 +2333,9 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
// If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
// If we have 0 || X, just emit X without inserting the control flow.
- if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
- if (Cond == -1) { // If we have 0 || X, just emit X.
+ bool LHSCondVal;
+ if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
+ if (!LHSCondVal) { // If we have 0 || X, just emit X.
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
// ZExt result to int or bool.
return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
@@ -2332,9 +2357,8 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
// Any edges into the ContBlock are now from an (indeterminate number of)
// edges from this first condition. All of these values will be true. Start
// setting up the PHI node in the Cont Block for this.
- llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext),
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
"", ContBlock);
- PN->reserveOperandSpace(2); // Normal case, two inputs.
for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
PI != PE; ++PI)
PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
@@ -2375,12 +2399,10 @@ Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
/// flow into selects in some cases.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
CodeGenFunction &CGF) {
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
- return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr(), CGF);
+ E = E->IgnoreParens();
- // TODO: Allow anything we can constant fold to an integer or fp constant.
- if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
- isa<FloatingLiteral>(E))
+ // Anything that is an integer or floating point constant is fine.
+ if (E->isConstantInitializer(CGF.getContext(), false))
return true;
// Non-volatile automatic variables too, to get "cond ? X : Y" where
@@ -2409,9 +2431,10 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm.
- if (int Cond = CGF.ConstantFoldsToSimpleInteger(condExpr)){
+ bool CondExprBool;
+ if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
Expr *live = lhsExpr, *dead = rhsExpr;
- if (Cond == -1) std::swap(live, dead);
+ if (!CondExprBool) std::swap(live, dead);
// If the dead side doesn't have labels we need, and if the Live side isn't
// the gnu missing ?: extension (which we could handle, but don't bother
@@ -2436,7 +2459,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
std::vector<llvm::Constant*> Zvals;
for (unsigned i = 0; i < numElem; ++i)
- Zvals.push_back(llvm::ConstantInt::get(elemType,0));
+ Zvals.push_back(llvm::ConstantInt::get(elemType, 0));
llvm::Value *zeroVec = llvm::ConstantVector::get(Zvals);
llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
@@ -2507,8 +2530,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
return LHS;
// Create a PHI node for the real part.
- llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond");
- PN->reserveOperandSpace(2);
+ llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
PN->addIncoming(LHS, LHSBlock);
PN->addIncoming(RHS, RHSBlock);
return PN;
@@ -2544,8 +2566,13 @@ Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
assert(E && !hasAggregateLLVMType(E->getType()) &&
"Invalid scalar expression to emit");
- return ScalarExprEmitter(*this, IgnoreResultAssign)
+ if (isa<CXXDefaultArgExpr>(E))
+ disableDebugInfo();
+ Value *V = ScalarExprEmitter(*this, IgnoreResultAssign)
.Visit(const_cast<Expr*>(E));
+ if (isa<CXXDefaultArgExpr>(E))
+ enableDebugInfo();
+ return V;
}
/// EmitScalarConversion - Emit a conversion from the specified type to the
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 5d34907..5b0d41e 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -119,28 +119,26 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
/// CodeGenFunction.
void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
- FunctionArgList Args;
+ FunctionArgList args;
// Check if we should generate debug info for this method.
- if (CGM.getDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
- DebugInfo = CGM.getDebugInfo();
+ if (CGM.getModuleDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getModuleDebugInfo();
llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
- Args.push_back(std::make_pair(OMD->getSelfDecl(),
- OMD->getSelfDecl()->getType()));
- Args.push_back(std::make_pair(OMD->getCmdDecl(),
- OMD->getCmdDecl()->getType()));
+ args.push_back(OMD->getSelfDecl());
+ args.push_back(OMD->getCmdDecl());
for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
E = OMD->param_end(); PI != E; ++PI)
- Args.push_back(std::make_pair(*PI, (*PI)->getType()));
+ args.push_back(*PI);
CurGD = OMD;
- StartFunction(OMD, OMD->getResultType(), Fn, Args, OMD->getLocStart());
+ StartFunction(OMD, OMD->getResultType(), Fn, FI, args, OMD->getLocStart());
}
void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
@@ -155,27 +153,24 @@ void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
CallArgList Args;
RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue,
Types.ConvertType(getContext().VoidPtrTy)));
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ Args.add(RV, getContext().VoidPtrTy);
RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
Types.ConvertType(getContext().VoidPtrTy)));
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ Args.add(RV, getContext().VoidPtrTy);
// sizeof (Type of Ivar)
CharUnits Size = getContext().getTypeSizeInChars(Ivar->getType());
llvm::Value *SizeVal =
llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy),
Size.getQuantity());
- Args.push_back(std::make_pair(RValue::get(SizeVal),
- getContext().LongTy));
+ Args.add(RValue::get(SizeVal), getContext().LongTy);
llvm::Value *isAtomic =
llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
IsAtomic ? 1 : 0);
- Args.push_back(std::make_pair(RValue::get(isAtomic),
- getContext().BoolTy));
+ Args.add(RValue::get(isAtomic), getContext().BoolTy);
llvm::Value *hasStrong =
llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
IsStrong ? 1 : 0);
- Args.push_back(std::make_pair(RValue::get(hasStrong),
- getContext().BoolTy));
+ Args.add(RValue::get(hasStrong), getContext().BoolTy);
EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
FunctionType::ExtInfo()),
GetCopyStructFn, ReturnValueSlot(), Args);
@@ -236,10 +231,10 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
llvm::Value *True =
llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
- Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
- Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
- Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ Args.add(RValue::get(SelfAsId), IdTy);
+ Args.add(RValue::get(CmdVal), Cmd->getType());
+ Args.add(RValue::get(Offset), getContext().getPointerDiffType());
+ Args.add(RValue::get(True), getContext().BoolTy);
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args,
@@ -263,6 +258,22 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
CGM.getObjCRuntime().GetGetStructFunction()) {
GenerateObjCGetterBody(Ivar, true, false);
}
+ else if (IsAtomic &&
+ (IVART->isScalarType() && !IVART->isRealFloatingType()) &&
+ Triple.getArch() == llvm::Triple::x86 &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(4)) &&
+ CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCGetterBody(Ivar, true, false);
+ }
+ else if (IsAtomic &&
+ (IVART->isScalarType() && !IVART->isRealFloatingType()) &&
+ Triple.getArch() == llvm::Triple::x86_64 &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(8)) &&
+ CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCGetterBody(Ivar, true, false);
+ }
else if (IVART->isAnyComplexType()) {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
Ivar, 0);
@@ -272,18 +283,36 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
}
else if (hasAggregateLLVMType(IVART)) {
bool IsStrong = false;
- if ((IsAtomic || (IsStrong = IvarTypeWithAggrGCObjects(IVART)))
+ if ((IsStrong = IvarTypeWithAggrGCObjects(IVART))
&& CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect
&& CGM.getObjCRuntime().GetGetStructFunction()) {
GenerateObjCGetterBody(Ivar, IsAtomic, IsStrong);
}
else {
- if (PID->getGetterCXXConstructor()) {
+ const CXXRecordDecl *classDecl = IVART->getAsCXXRecordDecl();
+
+ if (PID->getGetterCXXConstructor() &&
+ classDecl && !classDecl->hasTrivialConstructor()) {
ReturnStmt *Stmt =
new (getContext()) ReturnStmt(SourceLocation(),
PID->getGetterCXXConstructor(),
0);
EmitReturnStmt(*Stmt);
+ } else if (IsAtomic &&
+ !IVART->isAnyComplexType() &&
+ Triple.getArch() == llvm::Triple::x86 &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(4)) &&
+ CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCGetterBody(Ivar, true, false);
+ }
+ else if (IsAtomic &&
+ !IVART->isAnyComplexType() &&
+ Triple.getArch() == llvm::Triple::x86_64 &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(8)) &&
+ CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCGetterBody(Ivar, true, false);
}
else {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
@@ -295,11 +324,17 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
else {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
Ivar, 0);
- CodeGenTypes &Types = CGM.getTypes();
- RValue RV = EmitLoadOfLValue(LV, IVART);
- RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ if (PD->getType()->isReferenceType()) {
+ RValue RV = RValue::get(LV.getAddress());
+ EmitReturnOfRValue(RV, PD->getType());
+ }
+ else {
+ CodeGenTypes &Types = CGM.getTypes();
+ RValue RV = EmitLoadOfLValue(LV, IVART);
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
Types.ConvertType(PD->getType())));
- EmitReturnOfRValue(RV, PD->getType());
+ EmitReturnOfRValue(RV, PD->getType());
+ }
}
}
@@ -318,31 +353,42 @@ void CodeGenFunction::GenerateObjCAtomicSetterBody(ObjCMethodDecl *OMD,
RValue RV =
RValue::get(Builder.CreateBitCast(LV.getAddress(),
Types.ConvertType(getContext().VoidPtrTy)));
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ Args.add(RV, getContext().VoidPtrTy);
llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
llvm::Value *ArgAsPtrTy =
Builder.CreateBitCast(Arg,
Types.ConvertType(getContext().VoidPtrTy));
RV = RValue::get(ArgAsPtrTy);
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ Args.add(RV, getContext().VoidPtrTy);
// sizeof (Type of Ivar)
CharUnits Size = getContext().getTypeSizeInChars(Ivar->getType());
llvm::Value *SizeVal =
llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy),
Size.getQuantity());
- Args.push_back(std::make_pair(RValue::get(SizeVal),
- getContext().LongTy));
+ Args.add(RValue::get(SizeVal), getContext().LongTy);
llvm::Value *True =
llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
- Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ Args.add(RValue::get(True), getContext().BoolTy);
llvm::Value *False =
llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
- Args.push_back(std::make_pair(RValue::get(False), getContext().BoolTy));
+ Args.add(RValue::get(False), getContext().BoolTy);
EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
FunctionType::ExtInfo()),
GetCopyStructFn, ReturnValueSlot(), Args);
}
+static bool
+IvarAssignHasTrvialAssignment(const ObjCPropertyImplDecl *PID,
+ QualType IvarT) {
+ bool HasTrvialAssignment = true;
+ if (PID->getSetterCXXAssignment()) {
+ const CXXRecordDecl *classDecl = IvarT->getAsCXXRecordDecl();
+ HasTrvialAssignment =
+ (!classDecl || classDecl->hasTrivialCopyAssignment());
+ }
+ return HasTrvialAssignment;
+}
+
/// GenerateObjCSetter - Generate an Objective-C property setter
/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
/// is illegal within a category.
@@ -353,7 +399,8 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
assert(OMD && "Invalid call to generate setter (empty method)");
StartObjCMethod(OMD, IMP->getClassInterface());
-
+ const llvm::Triple &Triple = getContext().Target.getTriple();
+ QualType IVART = Ivar->getType();
bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
bool IsAtomic =
!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
@@ -394,32 +441,34 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
llvm::Value *False =
llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
- Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
- Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
- Args.push_back(std::make_pair(RValue::get(ArgAsId), IdTy));
- Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False),
- getContext().BoolTy));
- Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False),
- getContext().BoolTy));
+ Args.add(RValue::get(SelfAsId), IdTy);
+ Args.add(RValue::get(CmdVal), Cmd->getType());
+ Args.add(RValue::get(Offset), getContext().getPointerDiffType());
+ Args.add(RValue::get(ArgAsId), IdTy);
+ Args.add(RValue::get(IsAtomic ? True : False), getContext().BoolTy);
+ Args.add(RValue::get(IsCopy ? True : False), getContext().BoolTy);
// FIXME: We shouldn't need to get the function info here, the runtime
// already should have computed it to build the function.
EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
FunctionType::ExtInfo()),
SetPropertyFn,
ReturnValueSlot(), Args);
- } else if (IsAtomic && hasAggregateLLVMType(Ivar->getType()) &&
- !Ivar->getType()->isAnyComplexType() &&
- IndirectObjCSetterArg(*CurFnInfo)
+ } else if (IsAtomic && hasAggregateLLVMType(IVART) &&
+ !IVART->isAnyComplexType() &&
+ IvarAssignHasTrvialAssignment(PID, IVART) &&
+ ((Triple.getArch() == llvm::Triple::x86 &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(4))) ||
+ (Triple.getArch() == llvm::Triple::x86_64 &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(8))))
&& CGM.getObjCRuntime().GetSetStructFunction()) {
- // objc_copyStruct (&structIvar, &Arg,
- // sizeof (struct something), true, false);
+ // objc_copyStruct (&structIvar, &Arg,
+ // sizeof (struct something), true, false);
GenerateObjCAtomicSetterBody(OMD, Ivar);
} else if (PID->getSetterCXXAssignment()) {
EmitIgnoredExpr(PID->getSetterCXXAssignment());
} else {
- const llvm::Triple &Triple = getContext().Target.getTriple();
- QualType IVART = Ivar->getType();
if (IsAtomic &&
IVART->isScalarType() &&
(Triple.getArch() == llvm::Triple::arm ||
@@ -429,6 +478,22 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
CGM.getObjCRuntime().GetGetStructFunction()) {
GenerateObjCAtomicSetterBody(OMD, Ivar);
}
+ else if (IsAtomic &&
+ (IVART->isScalarType() && !IVART->isRealFloatingType()) &&
+ Triple.getArch() == llvm::Triple::x86 &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(4)) &&
+ CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCAtomicSetterBody(OMD, Ivar);
+ }
+ else if (IsAtomic &&
+ (IVART->isScalarType() && !IVART->isRealFloatingType()) &&
+ Triple.getArch() == llvm::Triple::x86_64 &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(8)) &&
+ CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCAtomicSetterBody(OMD, Ivar);
+ }
else {
// FIXME: Find a clean way to avoid AST node creation.
SourceLocation Loc = PD->getLocation();
@@ -436,7 +501,10 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
DeclRefExpr Base(Self, Self->getType(), VK_RValue, Loc);
ParmVarDecl *ArgDecl = *OMD->param_begin();
- DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), VK_LValue, Loc);
+ QualType T = ArgDecl->getType();
+ if (T->isReferenceType())
+ T = cast<ReferenceType>(T)->getPointeeType();
+ DeclRefExpr Arg(ArgDecl, T, VK_LValue, Loc);
ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, true, true);
// The property type can differ from the ivar type in some situations with
@@ -460,20 +528,104 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
FinishFunction();
}
+// FIXME: these are stolen from CGClass.cpp, which is lame.
+namespace {
+ struct CallArrayIvarDtor : EHScopeStack::Cleanup {
+ const ObjCIvarDecl *ivar;
+ llvm::Value *self;
+ CallArrayIvarDtor(const ObjCIvarDecl *ivar, llvm::Value *self)
+ : ivar(ivar), self(self) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ LValue lvalue =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), self, ivar, 0);
+
+ QualType type = ivar->getType();
+ const ConstantArrayType *arrayType
+ = CGF.getContext().getAsConstantArrayType(type);
+ QualType baseType = CGF.getContext().getBaseElementType(arrayType);
+ const CXXRecordDecl *classDecl = baseType->getAsCXXRecordDecl();
+
+ llvm::Value *base
+ = CGF.Builder.CreateBitCast(lvalue.getAddress(),
+ CGF.ConvertType(baseType)->getPointerTo());
+ CGF.EmitCXXAggrDestructorCall(classDecl->getDestructor(),
+ arrayType, base);
+ }
+ };
+
+ struct CallIvarDtor : EHScopeStack::Cleanup {
+ const ObjCIvarDecl *ivar;
+ llvm::Value *self;
+ CallIvarDtor(const ObjCIvarDecl *ivar, llvm::Value *self)
+ : ivar(ivar), self(self) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ LValue lvalue =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), self, ivar, 0);
+
+ QualType type = ivar->getType();
+ const CXXRecordDecl *classDecl = type->getAsCXXRecordDecl();
+
+ CGF.EmitCXXDestructorCall(classDecl->getDestructor(),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ lvalue.getAddress());
+ }
+ };
+}
+
+static void emitCXXDestructMethod(CodeGenFunction &CGF,
+ ObjCImplementationDecl *impl) {
+ CodeGenFunction::RunCleanupsScope scope(CGF);
+
+ llvm::Value *self = CGF.LoadObjCSelf();
+
+ ObjCInterfaceDecl *iface
+ = const_cast<ObjCInterfaceDecl*>(impl->getClassInterface());
+ for (ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar()) {
+ QualType type = ivar->getType();
+
+ // Drill down to the base element type.
+ QualType baseType = type;
+ const ConstantArrayType *arrayType =
+ CGF.getContext().getAsConstantArrayType(baseType);
+ if (arrayType) baseType = CGF.getContext().getBaseElementType(arrayType);
+
+ // Check whether the ivar is a destructible type.
+ QualType::DestructionKind destructKind = baseType.isDestructedType();
+ assert(destructKind == type.isDestructedType());
+
+ switch (destructKind) {
+ case QualType::DK_none:
+ continue;
+
+ case QualType::DK_cxx_destructor:
+ if (arrayType)
+ CGF.EHStack.pushCleanup<CallArrayIvarDtor>(NormalAndEHCleanup,
+ ivar, self);
+ else
+ CGF.EHStack.pushCleanup<CallIvarDtor>(NormalAndEHCleanup,
+ ivar, self);
+ break;
+ }
+ }
+
+ assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
+}
+
void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD,
bool ctor) {
- llvm::SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
StartObjCMethod(MD, IMP->getClassInterface());
- for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
- E = IMP->init_end(); B != E; ++B) {
- CXXCtorInitializer *Member = (*B);
- IvarInitializers.push_back(Member);
- }
+
+ // Emit .cxx_construct.
if (ctor) {
- for (unsigned I = 0, E = IvarInitializers.size(); I != E; ++I) {
- CXXCtorInitializer *IvarInit = IvarInitializers[I];
+ llvm::SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
+ for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
+ E = IMP->init_end(); B != E; ++B) {
+ CXXCtorInitializer *IvarInit = (*B);
FieldDecl *Field = IvarInit->getAnyMember();
ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
@@ -486,37 +638,10 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
llvm::Value *SelfAsId =
Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
+
+ // Emit .cxx_destruct.
} else {
- // dtor
- for (size_t i = IvarInitializers.size(); i > 0; --i) {
- FieldDecl *Field = IvarInitializers[i - 1]->getAnyMember();
- QualType FieldType = Field->getType();
- const ConstantArrayType *Array =
- getContext().getAsConstantArrayType(FieldType);
- if (Array)
- FieldType = getContext().getBaseElementType(FieldType);
-
- ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
- LoadObjCSelf(), Ivar, 0);
- const RecordType *RT = FieldType->getAs<RecordType>();
- CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor();
- if (!Dtor->isTrivial()) {
- if (Array) {
- const llvm::Type *BasePtr = ConvertType(FieldType);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(LV.getAddress(), BasePtr);
- EmitCXXAggrDestructorCall(Dtor,
- Array, BaseAddrPtr);
- } else {
- EmitCXXDestructorCall(Dtor,
- Dtor_Complete, /*ForVirtualBase=*/false,
- LV.getAddress());
- }
- }
- }
+ emitCXXDestructMethod(*this, IMP);
}
FinishFunction();
}
@@ -585,16 +710,14 @@ static RValue GenerateMessageSendSuper(CodeGenFunction &CGF,
RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
ReturnValueSlot Return) {
const ObjCPropertyRefExpr *E = LV.getPropertyRefExpr();
- QualType ResultType;
+ QualType ResultType = E->getGetterResultType();
Selector S;
if (E->isExplicitProperty()) {
const ObjCPropertyDecl *Property = E->getExplicitProperty();
S = Property->getGetterName();
- ResultType = E->getType();
} else {
const ObjCMethodDecl *Getter = E->getImplicitPropertyGetter();
S = Getter->getSelector();
- ResultType = Getter->getResultType(); // with reference!
}
llvm::Value *Receiver = LV.getPropertyRefBaseAddr();
@@ -615,14 +738,8 @@ void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
LValue Dst) {
const ObjCPropertyRefExpr *E = Dst.getPropertyRefExpr();
Selector S = E->getSetterSelector();
- QualType ArgType;
- if (E->isImplicitProperty()) {
- const ObjCMethodDecl *Setter = E->getImplicitPropertySetter();
- ObjCMethodDecl::param_iterator P = Setter->param_begin();
- ArgType = (*P)->getType();
- } else {
- ArgType = E->getType();
- }
+ QualType ArgType = E->getSetterArgType();
+
// FIXME. Other than scalars, AST is not adequate for setter and
// getter type mismatches which require conversion.
if (Src.isScalar()) {
@@ -635,7 +752,7 @@ void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
}
CallArgList Args;
- Args.push_back(std::make_pair(Src, ArgType));
+ Args.add(Src, ArgType);
llvm::Value *Receiver = Dst.getPropertyRefBaseAddr();
QualType ResultType = getContext().VoidTy;
@@ -707,22 +824,19 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
CallArgList Args;
// The first argument is a temporary of the enumeration-state type.
- Args.push_back(std::make_pair(RValue::get(StatePtr),
- getContext().getPointerType(StateTy)));
+ Args.add(RValue::get(StatePtr), getContext().getPointerType(StateTy));
// The second argument is a temporary array with space for NumItems
// pointers. We'll actually be loading elements from the array
// pointer written into the control state; this buffer is so that
// collections that *aren't* backed by arrays can still queue up
// batches of elements.
- Args.push_back(std::make_pair(RValue::get(ItemsPtr),
- getContext().getPointerType(ItemsTy)));
+ Args.add(RValue::get(ItemsPtr), getContext().getPointerType(ItemsTy));
// The third argument is the capacity of that temporary array.
const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
- Args.push_back(std::make_pair(RValue::get(Count),
- getContext().UnsignedLongTy));
+ Args.add(RValue::get(Count), getContext().UnsignedLongTy);
// Start the enumeration.
RValue CountRV =
@@ -764,11 +878,11 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
EmitBlock(LoopBodyBB);
// The current index into the buffer.
- llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, "forcoll.index");
+ llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.index");
index->addIncoming(zero, LoopInitBB);
// The current buffer size.
- llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, "forcoll.count");
+ llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count");
count->addIncoming(initialBufferLimit, LoopInitBB);
// Check whether the mutations value has changed from where it was
@@ -779,7 +893,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
= Builder.CreateLoad(StateMutationsPtr, "statemutations");
llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
- llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcool.notmutated");
+ llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
WasNotMutatedBB, WasMutatedBB);
@@ -791,8 +905,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
ConvertType(getContext().getObjCIdType()),
"tmp");
CallArgList Args2;
- Args2.push_back(std::make_pair(RValue::get(V),
- getContext().getObjCIdType()));
+ Args2.add(RValue::get(V), getContext().getObjCIdType());
// FIXME: We shouldn't need to get the function info here, the runtime already
// should have computed it to build the function.
EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2,
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 5f19dc6..c4dc4c4 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This provides Objective-C code generation targetting the GNU runtime. The
+// This provides Objective-C code generation targeting the GNU runtime. The
// class in this file generates structures used by the GNU Objective-C runtime
// library. These structures are defined in objc/objc.h and objc/objc-api.h in
// the GNU runtime distribution.
@@ -24,90 +24,342 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
#include "llvm/Intrinsics.h"
#include "llvm/Module.h"
#include "llvm/LLVMContext.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Target/TargetData.h"
-#include <map>
+#include <stdarg.h>
using namespace clang;
using namespace CodeGen;
using llvm::dyn_cast;
-// The version of the runtime that this class targets. Must match the version
-// in the runtime.
-static const int RuntimeVersion = 8;
-static const int NonFragileRuntimeVersion = 9;
-static const int ProtocolVersion = 2;
-static const int NonFragileProtocolVersion = 3;
namespace {
-class CGObjCGNU : public CodeGen::CGObjCRuntime {
-private:
- CodeGen::CodeGenModule &CGM;
+/// Class that lazily initialises the runtime function. Avoids inserting the
+/// types and the function declaration into a module if they're not used, and
+/// avoids constructing the type more than once if it's used more than once.
+class LazyRuntimeFunction {
+ CodeGenModule *CGM;
+ std::vector<const llvm::Type*> ArgTys;
+ const char *FunctionName;
+ llvm::Function *Function;
+ public:
+ /// Constructor leaves this class uninitialized, because it is intended to
+ /// be used as a field in another class and not all of the types that are
+ /// used as arguments will necessarily be available at construction time.
+ LazyRuntimeFunction() : CGM(0), FunctionName(0), Function(0) {}
+
+ /// Initialises the lazy function with the name, return type, and the types
+ /// of the arguments.
+ END_WITH_NULL
+ void init(CodeGenModule *Mod, const char *name,
+ const llvm::Type *RetTy, ...) {
+ CGM =Mod;
+ FunctionName = name;
+ Function = 0;
+ ArgTys.clear();
+ va_list Args;
+ va_start(Args, RetTy);
+ while (const llvm::Type *ArgTy = va_arg(Args, const llvm::Type*))
+ ArgTys.push_back(ArgTy);
+ va_end(Args);
+ // Push the return type on at the end so we can pop it off easily
+ ArgTys.push_back(RetTy);
+ }
+ /// Overloaded cast operator, allows the class to be implicitly cast to an
+ /// LLVM constant.
+ operator llvm::Function*() {
+ if (!Function) {
+ if (0 == FunctionName) return 0;
+ // We put the return type on the end of the vector, so pop it back off
+ const llvm::Type *RetTy = ArgTys.back();
+ ArgTys.pop_back();
+ llvm::FunctionType *FTy = llvm::FunctionType::get(RetTy, ArgTys, false);
+ Function =
+ cast<llvm::Function>(CGM->CreateRuntimeFunction(FTy, FunctionName));
+ // We won't need to use the types again, so we may as well clean up the
+ // vector now
+ ArgTys.resize(0);
+ }
+ return Function;
+ }
+};
+
+
+/// GNU Objective-C runtime code generation. This class implements the parts of
+/// Objective-C support that are specific to the GNU family of runtimes (GCC and
+/// GNUstep).
+class CGObjCGNU : public CGObjCRuntime {
+protected:
+ /// The module that is using this class
+ CodeGenModule &CGM;
+ /// The LLVM module into which output is inserted
llvm::Module &TheModule;
+ /// strut objc_super. Used for sending messages to super. This structure
+ /// contains the receiver (object) and the expected class.
+ const llvm::StructType *ObjCSuperTy;
+ /// struct objc_super*. The type of the argument to the superclass message
+ /// lookup functions.
+ const llvm::PointerType *PtrToObjCSuperTy;
+ /// LLVM type for selectors. Opaque pointer (i8*) unless a header declaring
+ /// SEL is included in a header somewhere, in which case it will be whatever
+ /// type is declared in that header, most likely {i8*, i8*}.
const llvm::PointerType *SelectorTy;
+ /// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the
+ /// places where it's used
const llvm::IntegerType *Int8Ty;
+ /// Pointer to i8 - LLVM type of char*, for all of the places where the
+ /// runtime needs to deal with C strings.
const llvm::PointerType *PtrToInt8Ty;
- const llvm::FunctionType *IMPTy;
+ /// Instance Method Pointer type. This is a pointer to a function that takes,
+ /// at a minimum, an object and a selector, and is the generic type for
+ /// Objective-C methods. Due to differences between variadic / non-variadic
+ /// calling conventions, it must always be cast to the correct type before
+ /// actually being used.
+ const llvm::PointerType *IMPTy;
+ /// Type of an untyped Objective-C object. Clang treats id as a built-in type
+ /// when compiling Objective-C code, so this may be an opaque pointer (i8*),
+ /// but if the runtime header declaring it is included then it may be a
+ /// pointer to a structure.
const llvm::PointerType *IdTy;
+ /// Pointer to a pointer to an Objective-C object. Used in the new ABI
+ /// message lookup function and some GC-related functions.
const llvm::PointerType *PtrToIdTy;
+ /// The clang type of id. Used when using the clang CGCall infrastructure to
+ /// call Objective-C methods.
CanQualType ASTIdTy;
+ /// LLVM type for C int type.
const llvm::IntegerType *IntTy;
+ /// LLVM type for an opaque pointer. This is identical to PtrToInt8Ty, but is
+ /// used in the code to document the difference between i8* meaning a pointer
+ /// to a C string and i8* meaning a pointer to some opaque type.
const llvm::PointerType *PtrTy;
+ /// LLVM type for C long type. The runtime uses this in a lot of places where
+ /// it should be using intptr_t, but we can't fix this without breaking
+ /// compatibility with GCC...
const llvm::IntegerType *LongTy;
+ /// LLVM type for C size_t. Used in various runtime data structures.
const llvm::IntegerType *SizeTy;
+ /// LLVM type for C ptrdiff_t. Mainly used in property accessor functions.
const llvm::IntegerType *PtrDiffTy;
+ /// LLVM type for C int*. Used for GCC-ABI-compatible non-fragile instance
+ /// variables.
const llvm::PointerType *PtrToIntTy;
+ /// LLVM type for Objective-C BOOL type.
const llvm::Type *BoolTy;
+ /// Metadata kind used to tie method lookups to message sends. The GNUstep
+ /// runtime provides some LLVM passes that can use this to do things like
+ /// automatic IMP caching and speculative inlining.
+ unsigned msgSendMDKind;
+ /// Helper function that generates a constant string and returns a pointer to
+ /// the start of the string. The result of this function can be used anywhere
+ /// where the C code specifies const char*.
+ llvm::Constant *MakeConstantString(const std::string &Str,
+ const std::string &Name="") {
+ llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+ }
+ /// Emits a linkonce_odr string, whose name is the prefix followed by the
+ /// string value. This allows the linker to combine the strings between
+ /// different modules. Used for EH typeinfo names, selector strings, and a
+ /// few other things.
+ llvm::Constant *ExportUniqueString(const std::string &Str,
+ const std::string prefix) {
+ std::string name = prefix + Str;
+ llvm::Constant *ConstStr = TheModule.getGlobalVariable(name);
+ if (!ConstStr) {
+ llvm::Constant *value = llvm::ConstantArray::get(VMContext, Str, true);
+ ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
+ llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+ }
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+ }
+ /// Generates a global structure, initialized by the elements in the vector.
+ /// The element types must match the types of the structure elements in the
+ /// first argument.
+ llvm::GlobalVariable *MakeGlobal(const llvm::StructType *Ty,
+ std::vector<llvm::Constant*> &V,
+ llvm::StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ }
+ /// Generates a global array. The vector must contain the same number of
+ /// elements that the array type declares, of the type specified as the array
+ /// element type.
+ llvm::GlobalVariable *MakeGlobal(const llvm::ArrayType *Ty,
+ std::vector<llvm::Constant*> &V,
+ llvm::StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ }
+ /// Generates a global array, inferring the array type from the specified
+ /// element type and the size of the initialiser.
+ llvm::GlobalVariable *MakeGlobalArray(const llvm::Type *Ty,
+ std::vector<llvm::Constant*> &V,
+ llvm::StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
+ return MakeGlobal(ArrayTy, V, Name, linkage);
+ }
+ /// Ensures that the value has the required type, by inserting a bitcast if
+ /// required. This function lets us avoid inserting bitcasts that are
+ /// redundant.
+ llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, const llvm::Type *Ty){
+ if (V->getType() == Ty) return V;
+ return B.CreateBitCast(V, Ty);
+ }
+ // Some zeros used for GEPs in lots of places.
+ llvm::Constant *Zeros[2];
+ /// Null pointer value. Mainly used as a terminator in various arrays.
+ llvm::Constant *NULLPtr;
+ /// LLVM context.
+ llvm::LLVMContext &VMContext;
+private:
+ /// Placeholder for the class. Lots of things refer to the class before we've
+ /// actually emitted it. We use this alias as a placeholder, and then replace
+ /// it with a pointer to the class structure before finally emitting the
+ /// module.
llvm::GlobalAlias *ClassPtrAlias;
+ /// Placeholder for the metaclass. Lots of things refer to the class before
+ /// we've / actually emitted it. We use this alias as a placeholder, and then
+ /// replace / it with a pointer to the metaclass structure before finally
+ /// emitting the / module.
llvm::GlobalAlias *MetaClassPtrAlias;
+ /// All of the classes that have been generated for this compilation units.
std::vector<llvm::Constant*> Classes;
+ /// All of the categories that have been generated for this compilation units.
std::vector<llvm::Constant*> Categories;
+ /// All of the Objective-C constant strings that have been generated for this
+ /// compilation units.
std::vector<llvm::Constant*> ConstantStrings;
+ /// Map from string values to Objective-C constant strings in the output.
+ /// Used to prevent emitting Objective-C strings more than once. This should
+ /// not be required at all - CodeGenModule should manage this list.
llvm::StringMap<llvm::Constant*> ObjCStrings;
- llvm::Function *LoadFunction;
+ /// All of the protocols that have been declared.
llvm::StringMap<llvm::Constant*> ExistingProtocols;
- typedef std::pair<std::string, std::string> TypedSelector;
- std::map<TypedSelector, llvm::GlobalAlias*> TypedSelectors;
- llvm::StringMap<llvm::GlobalAlias*> UntypedSelectors;
- // Selectors that we don't emit in GC mode
+ /// For each variant of a selector, we store the type encoding and a
+ /// placeholder value. For an untyped selector, the type will be the empty
+ /// string. Selector references are all done via the module's selector table,
+ /// so we create an alias as a placeholder and then replace it with the real
+ /// value later.
+ typedef std::pair<std::string, llvm::GlobalAlias*> TypedSelector;
+ /// Type of the selector map. This is roughly equivalent to the structure
+ /// used in the GNUstep runtime, which maintains a list of all of the valid
+ /// types for a selector in a table.
+ typedef llvm::DenseMap<Selector, llvm::SmallVector<TypedSelector, 2> >
+ SelectorMap;
+ /// A map from selectors to selector types. This allows us to emit all
+ /// selectors of the same name and type together.
+ SelectorMap SelectorTable;
+
+ /// Selectors related to memory management. When compiling in GC mode, we
+ /// omit these.
Selector RetainSel, ReleaseSel, AutoreleaseSel;
- // Functions used for GC.
- llvm::Constant *IvarAssignFn, *StrongCastAssignFn, *MemMoveFn, *WeakReadFn,
- *WeakAssignFn, *GlobalAssignFn;
- // Some zeros used for GEPs in lots of places.
- llvm::Constant *Zeros[2];
- llvm::Constant *NULLPtr;
- llvm::LLVMContext &VMContext;
- /// Metadata kind used to tie method lookups to message sends.
- unsigned msgSendMDKind;
+ /// Runtime functions used for memory management in GC mode. Note that clang
+ /// supports code generation for calling these functions, but neither GNU
+ /// runtime actually supports this API properly yet.
+ LazyRuntimeFunction IvarAssignFn, StrongCastAssignFn, MemMoveFn, WeakReadFn,
+ WeakAssignFn, GlobalAssignFn;
+
+protected:
+ /// Function used for throwing Objective-C exceptions.
+ LazyRuntimeFunction ExceptionThrowFn;
+ /// Function used for rethrowing exceptions, used at the end of @finally or
+ /// @synchronize blocks.
+ LazyRuntimeFunction ExceptionReThrowFn;
+ /// Function called when entering a catch function. This is required for
+ /// differentiating Objective-C exceptions and foreign exceptions.
+ LazyRuntimeFunction EnterCatchFn;
+ /// Function called when exiting from a catch block. Used to do exception
+ /// cleanup.
+ LazyRuntimeFunction ExitCatchFn;
+ /// Function called when entering an @synchronize block. Acquires the lock.
+ LazyRuntimeFunction SyncEnterFn;
+ /// Function called when exiting an @synchronize block. Releases the lock.
+ LazyRuntimeFunction SyncExitFn;
+
+private:
+
+ /// Function called if fast enumeration detects that the collection is
+ /// modified during the update.
+ LazyRuntimeFunction EnumerationMutationFn;
+ /// Function for implementing synthesized property getters that return an
+ /// object.
+ LazyRuntimeFunction GetPropertyFn;
+ /// Function for implementing synthesized property setters that return an
+ /// object.
+ LazyRuntimeFunction SetPropertyFn;
+ /// Function used for non-object declared property getters.
+ LazyRuntimeFunction GetStructPropertyFn;
+ /// Function used for non-object declared property setters.
+ LazyRuntimeFunction SetStructPropertyFn;
+
+ /// The version of the runtime that this class targets. Must match the
+ /// version in the runtime.
+ const int RuntimeVersion;
+ /// The version of the protocol class. Used to differentiate between ObjC1
+ /// and ObjC2 protocols. Objective-C 1 protocols can not contain optional
+ /// components and can not contain declared properties. We always emit
+ /// Objective-C 2 property structures, but we have to pretend that they're
+ /// Objective-C 1 property structures when targeting the GCC runtime or it
+ /// will abort.
+ const int ProtocolVersion;
private:
+ /// Generates an instance variable list structure. This is a structure
+ /// containing a size and an array of structures containing instance variable
+ /// metadata. This is used purely for introspection in the fragile ABI. In
+ /// the non-fragile ABI, it's used for instance variable fixup.
llvm::Constant *GenerateIvarList(
const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets);
- llvm::Constant *GenerateMethodList(const std::string &ClassName,
- const std::string &CategoryName,
+ /// Generates a method list structure. This is a structure containing a size
+ /// and an array of structures containing method metadata.
+ ///
+ /// This structure is used by both classes and categories, and contains a next
+ /// pointer allowing them to be chained together in a linked list.
+ llvm::Constant *GenerateMethodList(const llvm::StringRef &ClassName,
+ const llvm::StringRef &CategoryName,
const llvm::SmallVectorImpl<Selector> &MethodSels,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
bool isClassMethodList);
+ /// Emits an empty protocol. This is used for @protocol() where no protocol
+ /// is found. The runtime will (hopefully) fix up the pointer to refer to the
+ /// real protocol.
llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+ /// Generates a list of property metadata structures. This follows the same
+ /// pattern as method and instance variable metadata lists.
llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID,
llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
+ /// Generates a list of referenced protocols. Classes, categories, and
+ /// protocols all use this structure.
llvm::Constant *GenerateProtocolList(
const llvm::SmallVectorImpl<std::string> &Protocols);
- // To ensure that all protocols are seen by the runtime, we add a category on
- // a class defined in the runtime, declaring no methods, but adopting the
- // protocols.
+ /// To ensure that all protocols are seen by the runtime, we add a category on
+ /// a class defined in the runtime, declaring no methods, but adopting the
+ /// protocols. This is a horribly ugly hack, but it allows us to collect all
+ /// of the protocols without changing the ABI.
void GenerateProtocolHolderCategory(void);
+ /// Generates a class structure.
llvm::Constant *GenerateClassStructure(
llvm::Constant *MetaClass,
llvm::Constant *SuperClass,
@@ -121,31 +373,43 @@ private:
llvm::Constant *IvarOffsets,
llvm::Constant *Properties,
bool isMeta=false);
+ /// Generates a method list. This is used by protocols to define the required
+ /// and optional methods.
llvm::Constant *GenerateProtocolMethodList(
const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes);
- llvm::Constant *MakeConstantString(const std::string &Str, const std::string
- &Name="");
- llvm::Constant *ExportUniqueString(const std::string &Str, const std::string
- prefix);
- llvm::Constant *MakeGlobal(const llvm::StructType *Ty,
- std::vector<llvm::Constant*> &V, llvm::StringRef Name="",
- llvm::GlobalValue::LinkageTypes linkage=llvm::GlobalValue::InternalLinkage);
- llvm::Constant *MakeGlobal(const llvm::ArrayType *Ty,
- std::vector<llvm::Constant*> &V, llvm::StringRef Name="",
- llvm::GlobalValue::LinkageTypes linkage=llvm::GlobalValue::InternalLinkage);
+ /// Returns a selector with the specified type encoding. An empty string is
+ /// used to return an untyped selector (with the types field set to NULL).
+ llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ const std::string &TypeEncoding, bool lval);
+ /// Returns the variable used to store the offset of an instance variable.
llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
const ObjCIvarDecl *Ivar);
+ /// Emits a reference to a class. This allows the linker to object if there
+ /// is no class of the matching name.
void EmitClassRef(const std::string &className);
- llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, const llvm::Type *Ty){
- if (V->getType() == Ty) return V;
- return B.CreateBitCast(V, Ty);
- }
+protected:
+ /// Looks up the method for sending a message to the specified object. This
+ /// mechanism differs between the GCC and GNU runtimes, so this method must be
+ /// overridden in subclasses.
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) = 0;
+ /// Looks up the method for sending a message to a superclass. This mechanism
+ /// differs between the GCC and GNU runtimes, so this method must be
+ /// overridden in subclasses.
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) = 0;
public:
- CGObjCGNU(CodeGen::CodeGenModule &cgm);
+ CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
+ unsigned protocolClassVersion);
+
virtual llvm::Constant *GenerateConstantString(const StringLiteral *);
- virtual CodeGen::RValue
- GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+
+ virtual RValue
+ GenerateMessageSend(CodeGenFunction &CGF,
ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
@@ -153,8 +417,8 @@ public:
const CallArgList &CallArgs,
const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method);
- virtual CodeGen::RValue
- GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ virtual RValue
+ GenerateMessageSendSuper(CodeGenFunction &CGF,
ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
@@ -186,41 +450,174 @@ public:
virtual llvm::Function *GetGetStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
- virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ virtual void EmitTryStmt(CodeGenFunction &CGF,
const ObjCAtTryStmt &S);
- virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ virtual void EmitSynchronizedStmt(CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S);
- virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ virtual void EmitThrowStmt(CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
- virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ virtual llvm::Value * EmitObjCWeakRead(CodeGenFunction &CGF,
llvm::Value *AddrWeakObj);
- virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ virtual void EmitObjCWeakAssign(CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst);
- virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ virtual void EmitObjCGlobalAssign(CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest,
bool threadlocal=false);
- virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ virtual void EmitObjCIvarAssign(CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest,
llvm::Value *ivarOffset);
- virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ virtual void EmitObjCStrongCastAssign(CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest);
- virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ virtual void EmitGCMemmoveCollectable(CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
llvm::Value *Size);
- virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ virtual LValue EmitObjCValueForIvar(CodeGenFunction &CGF,
QualType ObjectTy,
llvm::Value *BaseValue,
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers);
- virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ virtual llvm::Value *EmitIvarOffset(CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
- virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
return NULLPtr;
}
};
+/// Class representing the legacy GCC Objective-C ABI. This is the default when
+/// -fobjc-nonfragile-abi is not specified.
+///
+/// The GCC ABI target actually generates code that is approximately compatible
+/// with the new GNUstep runtime ABI, but refrains from using any features that
+/// would not work with the GCC runtime. For example, clang always generates
+/// the extended form of the class structure, and the extra fields are simply
+/// ignored by GCC libobjc.
+class CGObjCGCC : public CGObjCGNU {
+ /// The GCC ABI message lookup function. Returns an IMP pointing to the
+ /// method implementation for this message.
+ LazyRuntimeFunction MsgLookupFn;
+ /// The GCC ABI superclass message lookup function. Takes a pointer to a
+ /// structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the IMP for the corresponding method.
+ LazyRuntimeFunction MsgLookupSuperFn;
+protected:
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *imp = Builder.CreateCall2(MsgLookupFn,
+ EnforceType(Builder, Receiver, IdTy),
+ EnforceType(Builder, cmd, SelectorTy));
+ cast<llvm::CallInst>(imp)->setMetadata(msgSendMDKind, node);
+ return imp;
+ }
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
+ PtrToObjCSuperTy), cmd};
+ return Builder.CreateCall(MsgLookupSuperFn, lookupArgs, lookupArgs+2);
+ }
+ public:
+ CGObjCGCC(CodeGenModule &Mod) : CGObjCGNU(Mod, 8, 2) {
+ // IMP objc_msg_lookup(id, SEL);
+ MsgLookupFn.init(&CGM, "objc_msg_lookup", IMPTy, IdTy, SelectorTy, NULL);
+ // IMP objc_msg_lookup_super(struct objc_super*, SEL);
+ MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy,
+ PtrToObjCSuperTy, SelectorTy, NULL);
+ }
+};
+/// Class used when targeting the new GNUstep runtime ABI.
+class CGObjCGNUstep : public CGObjCGNU {
+ /// The slot lookup function. Returns a pointer to a cacheable structure
+ /// that contains (among other things) the IMP.
+ LazyRuntimeFunction SlotLookupFn;
+ /// The GNUstep ABI superclass message lookup function. Takes a pointer to
+ /// a structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the slot for the corresponding method. Superclass
+ /// message lookup rarely changes, so this is a good caching opportunity.
+ LazyRuntimeFunction SlotLookupSuperFn;
+ /// Type of an slot structure pointer. This is returned by the various
+ /// lookup functions.
+ llvm::Type *SlotTy;
+ protected:
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Function *LookupFn = SlotLookupFn;
+
+ // Store the receiver on the stack so that we can reload it later
+ llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType());
+ Builder.CreateStore(Receiver, ReceiverPtr);
+
+ llvm::Value *self;
+
+ if (isa<ObjCMethodDecl>(CGF.CurCodeDecl)) {
+ self = CGF.LoadObjCSelf();
+ } else {
+ self = llvm::ConstantPointerNull::get(IdTy);
+ }
+
+ // The lookup function is guaranteed not to capture the receiver pointer.
+ LookupFn->setDoesNotCapture(1);
+
+ llvm::CallInst *slot =
+ Builder.CreateCall3(LookupFn,
+ EnforceType(Builder, ReceiverPtr, PtrToIdTy),
+ EnforceType(Builder, cmd, SelectorTy),
+ EnforceType(Builder, self, IdTy));
+ slot->setOnlyReadsMemory();
+ slot->setMetadata(msgSendMDKind, node);
+
+ // Load the imp from the slot
+ llvm::Value *imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+
+ // The lookup function may have changed the receiver, so make sure we use
+ // the new one.
+ Receiver = Builder.CreateLoad(ReceiverPtr, true);
+ return imp;
+ }
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+
+ llvm::CallInst *slot = Builder.CreateCall(SlotLookupSuperFn, lookupArgs,
+ lookupArgs+2);
+ slot->setOnlyReadsMemory();
+
+ return Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+ }
+ public:
+ CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) {
+ llvm::StructType *SlotStructTy = llvm::StructType::get(VMContext, PtrTy,
+ PtrTy, PtrTy, IntTy, IMPTy, NULL);
+ SlotTy = llvm::PointerType::getUnqual(SlotStructTy);
+ // Slot_t objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
+ SlotLookupFn.init(&CGM, "objc_msg_lookup_sender", SlotTy, PtrToIdTy,
+ SelectorTy, IdTy, NULL);
+ // Slot_t objc_msg_lookup_super(struct objc_super*, SEL);
+ SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
+ PtrToObjCSuperTy, SelectorTy, NULL);
+ // If we're in ObjC++ mode, then we want to make
+ if (CGM.getLangOptions().CPlusPlus) {
+ const llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ // void *__cxa_begin_catch(void *e)
+ EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, NULL);
+ // void __cxa_end_catch(void)
+ EnterCatchFn.init(&CGM, "__cxa_end_catch", VoidTy, NULL);
+ // void _Unwind_Resume_or_Rethrow(void*)
+ ExceptionReThrowFn.init(&CGM, "_Unwind_Resume_or_Rethrow", VoidTy, PtrTy, NULL);
+ }
+ }
+};
+
} // end anonymous namespace
@@ -242,45 +639,34 @@ void CGObjCGNU::EmitClassRef(const std::string &className) {
llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
}
-static std::string SymbolNameForMethod(const std::string &ClassName, const
- std::string &CategoryName, const std::string &MethodName, bool isClassMethod)
-{
- std::string MethodNameColonStripped = MethodName;
+static std::string SymbolNameForMethod(const llvm::StringRef &ClassName,
+ const llvm::StringRef &CategoryName, const Selector MethodName,
+ bool isClassMethod) {
+ std::string MethodNameColonStripped = MethodName.getAsString();
std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
':', '_');
- return std::string(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
- CategoryName + "_" + MethodNameColonStripped;
-}
-static std::string MangleSelectorTypes(const std::string &TypeString) {
- std::string Mangled = TypeString;
- // Simple mangling to avoid breaking when we mix JIT / static code.
- // Not part of the ABI, subject to change without notice.
- std::replace(Mangled.begin(), Mangled.end(), '@', '_');
- std::replace(Mangled.begin(), Mangled.end(), ':', 'J');
- std::replace(Mangled.begin(), Mangled.end(), '*', 'e');
- std::replace(Mangled.begin(), Mangled.end(), '#', 'E');
- std::replace(Mangled.begin(), Mangled.end(), ':', 'j');
- std::replace(Mangled.begin(), Mangled.end(), '(', 'g');
- std::replace(Mangled.begin(), Mangled.end(), ')', 'G');
- std::replace(Mangled.begin(), Mangled.end(), '[', 'h');
- std::replace(Mangled.begin(), Mangled.end(), ']', 'H');
- return Mangled;
+ return (llvm::Twine(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
+ CategoryName + "_" + MethodNameColonStripped).str();
}
-CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
- : CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0),
- MetaClassPtrAlias(0), VMContext(cgm.getLLVMContext()) {
+CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
+ unsigned protocolClassVersion)
+ : CGM(cgm), TheModule(CGM.getModule()), VMContext(cgm.getLLVMContext()),
+ ClassPtrAlias(0), MetaClassPtrAlias(0), RuntimeVersion(runtimeABIVersion),
+ ProtocolVersion(protocolClassVersion) {
+
msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
+ CodeGenTypes &Types = CGM.getTypes();
IntTy = cast<llvm::IntegerType>(
- CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+ Types.ConvertType(CGM.getContext().IntTy));
LongTy = cast<llvm::IntegerType>(
- CGM.getTypes().ConvertType(CGM.getContext().LongTy));
+ Types.ConvertType(CGM.getContext().LongTy));
SizeTy = cast<llvm::IntegerType>(
- CGM.getTypes().ConvertType(CGM.getContext().getSizeType()));
+ Types.ConvertType(CGM.getContext().getSizeType()));
PtrDiffTy = cast<llvm::IntegerType>(
- CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()));
+ Types.ConvertType(CGM.getContext().getPointerDiffType()));
BoolTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
Int8Ty = llvm::Type::getInt8Ty(VMContext);
@@ -302,20 +688,54 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
PtrTy = PtrToInt8Ty;
// Object type
- ASTIdTy = CGM.getContext().getCanonicalType(CGM.getContext().getObjCIdType());
- if (QualType() == ASTIdTy) {
- IdTy = PtrToInt8Ty;
- } else {
+ QualType UnqualIdTy = CGM.getContext().getObjCIdType();
+ ASTIdTy = CanQualType();
+ if (UnqualIdTy != QualType()) {
+ ASTIdTy = CGM.getContext().getCanonicalType(UnqualIdTy);
IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ } else {
+ IdTy = PtrToInt8Ty;
}
PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
+ ObjCSuperTy = llvm::StructType::get(VMContext, IdTy, IdTy, NULL);
+ PtrToObjCSuperTy = llvm::PointerType::getUnqual(ObjCSuperTy);
+
+ const llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+
+ // void objc_exception_throw(id);
+ ExceptionThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, NULL);
+ ExceptionReThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, NULL);
+ // int objc_sync_enter(id);
+ SyncEnterFn.init(&CGM, "objc_sync_enter", IntTy, IdTy, NULL);
+ // int objc_sync_exit(id);
+ SyncExitFn.init(&CGM, "objc_sync_exit", IntTy, IdTy, NULL);
+
+ // void objc_enumerationMutation (id)
+ EnumerationMutationFn.init(&CGM, "objc_enumerationMutation", VoidTy,
+ IdTy, NULL);
+
+ // id objc_getProperty(id, SEL, ptrdiff_t, BOOL)
+ GetPropertyFn.init(&CGM, "objc_getProperty", IdTy, IdTy, SelectorTy,
+ PtrDiffTy, BoolTy, NULL);
+ // void objc_setProperty(id, SEL, ptrdiff_t, id, BOOL, BOOL)
+ SetPropertyFn.init(&CGM, "objc_setProperty", VoidTy, IdTy, SelectorTy,
+ PtrDiffTy, IdTy, BoolTy, BoolTy, NULL);
+ // void objc_setPropertyStruct(void*, void*, ptrdiff_t, BOOL, BOOL)
+ GetStructPropertyFn.init(&CGM, "objc_getPropertyStruct", VoidTy, PtrTy, PtrTy,
+ PtrDiffTy, BoolTy, BoolTy, NULL);
+ // void objc_setPropertyStruct(void*, void*, ptrdiff_t, BOOL, BOOL)
+ SetStructPropertyFn.init(&CGM, "objc_setPropertyStruct", VoidTy, PtrTy, PtrTy,
+ PtrDiffTy, BoolTy, BoolTy, NULL);
+
// IMP type
std::vector<const llvm::Type*> IMPArgs;
IMPArgs.push_back(IdTy);
IMPArgs.push_back(SelectorTy);
- IMPTy = llvm::FunctionType::get(IdTy, IMPArgs, true);
+ IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs,
+ true));
+ // Don't bother initialising the GC stuff unless we're compiling in GC mode
if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
// Get selectors needed in GC mode
RetainSel = GetNullarySelector("retain", CGM.getContext());
@@ -325,34 +745,21 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
// Get functions needed in GC mode
// id objc_assign_ivar(id, id, ptrdiff_t);
- std::vector<const llvm::Type*> Args(1, IdTy);
- Args.push_back(PtrToIdTy);
- Args.push_back(PtrDiffTy);
- llvm::FunctionType *FTy = llvm::FunctionType::get(IdTy, Args, false);
- IvarAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+ IvarAssignFn.init(&CGM, "objc_assign_ivar", IdTy, IdTy, IdTy, PtrDiffTy,
+ NULL);
// id objc_assign_strongCast (id, id*)
- Args.pop_back();
- FTy = llvm::FunctionType::get(IdTy, Args, false);
- StrongCastAssignFn =
- CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+ StrongCastAssignFn.init(&CGM, "objc_assign_strongCast", IdTy, IdTy,
+ PtrToIdTy, NULL);
// id objc_assign_global(id, id*);
- FTy = llvm::FunctionType::get(IdTy, Args, false);
- GlobalAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+ GlobalAssignFn.init(&CGM, "objc_assign_global", IdTy, IdTy, PtrToIdTy,
+ NULL);
// id objc_assign_weak(id, id*);
- FTy = llvm::FunctionType::get(IdTy, Args, false);
- WeakAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+ WeakAssignFn.init(&CGM, "objc_assign_weak", IdTy, IdTy, PtrToIdTy, NULL);
// id objc_read_weak(id*);
- Args.clear();
- Args.push_back(PtrToIdTy);
- FTy = llvm::FunctionType::get(IdTy, Args, false);
- WeakReadFn = CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+ WeakReadFn.init(&CGM, "objc_read_weak", IdTy, PtrToIdTy, NULL);
// void *objc_memmove_collectable(void*, void *, size_t);
- Args.clear();
- Args.push_back(PtrToInt8Ty);
- Args.push_back(PtrToInt8Ty);
- Args.push_back(SizeTy);
- FTy = llvm::FunctionType::get(IdTy, Args, false);
- MemMoveFn = CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+ MemMoveFn.init(&CGM, "objc_memmove_collectable", PtrTy, PtrTy, PtrTy,
+ SizeTy, NULL);
}
}
@@ -378,79 +785,127 @@ llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
}
llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ const std::string &TypeEncoding, bool lval) {
+
+ llvm::SmallVector<TypedSelector, 2> &Types = SelectorTable[Sel];
+ llvm::GlobalAlias *SelValue = 0;
+
+
+ for (llvm::SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
+ e = Types.end() ; i!=e ; i++) {
+ if (i->first == TypeEncoding) {
+ SelValue = i->second;
+ break;
+ }
+ }
+ if (0 == SelValue) {
+ SelValue = new llvm::GlobalAlias(SelectorTy,
+ llvm::GlobalValue::PrivateLinkage,
+ ".objc_selector_"+Sel.getAsString(), NULL,
+ &TheModule);
+ Types.push_back(TypedSelector(TypeEncoding, SelValue));
+ }
+
+ if (lval) {
+ llvm::Value *tmp = Builder.CreateAlloca(SelValue->getType());
+ Builder.CreateStore(SelValue, tmp);
+ return tmp;
+ }
+ return SelValue;
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
bool lval) {
- llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()];
- if (US == 0)
- US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy),
- llvm::GlobalValue::PrivateLinkage,
- ".objc_untyped_selector_alias"+Sel.getAsString(),
- NULL, &TheModule);
- if (lval)
- return US;
- return Builder.CreateLoad(US);
+ return GetSelector(Builder, Sel, std::string(), lval);
}
llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
*Method) {
-
- std::string SelName = Method->getSelector().getAsString();
std::string SelTypes;
CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
- // Typed selectors
- TypedSelector Selector = TypedSelector(SelName,
- SelTypes);
-
- // If it's already cached, return it.
- if (TypedSelectors[Selector]) {
- return Builder.CreateLoad(TypedSelectors[Selector]);
- }
-
- // If it isn't, cache it.
- llvm::GlobalAlias *Sel = new llvm::GlobalAlias(
- llvm::PointerType::getUnqual(SelectorTy),
- llvm::GlobalValue::PrivateLinkage, ".objc_selector_alias" + SelName,
- NULL, &TheModule);
- TypedSelectors[Selector] = Sel;
-
- return Builder.CreateLoad(Sel);
+ return GetSelector(Builder, Method->getSelector(), SelTypes, false);
}
llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
- llvm_unreachable("asking for catch type for ObjC type in GNU runtime");
- return 0;
-}
+ if (!CGM.getLangOptions().CPlusPlus) {
+ if (T->isObjCIdType()
+ || T->isObjCQualifiedIdType()) {
+ // With the old ABI, there was only one kind of catchall, which broke
+ // foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
+ // a pointer indicating object catchalls, and NULL to indicate real
+ // catchalls
+ if (CGM.getLangOptions().ObjCNonFragileABI) {
+ return MakeConstantString("@id");
+ } else {
+ return 0;
+ }
+ }
-llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str,
- const std::string &Name) {
- llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
- return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
-}
-llvm::Constant *CGObjCGNU::ExportUniqueString(const std::string &Str,
- const std::string prefix) {
- std::string name = prefix + Str;
- llvm::Constant *ConstStr = TheModule.getGlobalVariable(name);
- if (!ConstStr) {
- llvm::Constant *value = llvm::ConstantArray::get(VMContext, Str, true);
- ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
- llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl =
+ OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ return MakeConstantString(IDecl->getIdentifier()->getName());
+ }
+ // For Objective-C++, we want to provide the ability to catch both C++ and
+ // Objective-C objects in the same function.
+
+ // There's a particular fixed type info for 'id'.
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ llvm::Constant *IDEHType =
+ CGM.getModule().getGlobalVariable("__objc_id_type_info");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), PtrToInt8Ty,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "__objc_id_type_info");
+ return llvm::ConstantExpr::getBitCast(IDEHType, PtrToInt8Ty);
}
- return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
-}
-
-llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
- std::vector<llvm::Constant*> &V, llvm::StringRef Name,
- llvm::GlobalValue::LinkageTypes linkage) {
- llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
- return new llvm::GlobalVariable(TheModule, Ty, false,
- linkage, C, Name);
-}
-llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
- std::vector<llvm::Constant*> &V, llvm::StringRef Name,
- llvm::GlobalValue::LinkageTypes linkage) {
- llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
- return new llvm::GlobalVariable(TheModule, Ty, false,
- linkage, C, Name);
+ const ObjCObjectPointerType *PT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ std::string className = IT->getDecl()->getIdentifier()->getName();
+
+ std::string typeinfoName = "__objc_eh_typeinfo_" + className;
+
+ // Return the existing typeinfo if it exists
+ llvm::Constant *typeinfo = TheModule.getGlobalVariable(typeinfoName);
+ if (typeinfo) return typeinfo;
+
+ // Otherwise create it.
+
+ // vtable for gnustep::libobjc::__objc_class_type_info
+ // It's quite ugly hard-coding this. Ideally we'd generate it using the host
+ // platform's name mangling.
+ const char *vtableName = "_ZTVN7gnustep7libobjc22__objc_class_type_infoE";
+ llvm::Constant *Vtable = TheModule.getGlobalVariable(vtableName);
+ if (!Vtable) {
+ Vtable = new llvm::GlobalVariable(TheModule, PtrToInt8Ty, true,
+ llvm::GlobalValue::ExternalLinkage, 0, vtableName);
+ }
+ llvm::Constant *Two = llvm::ConstantInt::get(IntTy, 2);
+ Vtable = llvm::ConstantExpr::getGetElementPtr(Vtable, &Two, 1);
+ Vtable = llvm::ConstantExpr::getBitCast(Vtable, PtrToInt8Ty);
+
+ llvm::Constant *typeName =
+ ExportUniqueString(className, "__objc_eh_typename_");
+
+ std::vector<llvm::Constant*> fields;
+ fields.push_back(Vtable);
+ fields.push_back(typeName);
+ llvm::Constant *TI =
+ MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ NULL), fields, "__objc_eh_typeinfo_" + className,
+ llvm::GlobalValue::LinkOnceODRLinkage);
+ return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
}
/// Generate an NSConstantString object.
@@ -479,8 +934,8 @@ llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
///Generates a message send where the super is the receiver. This is a message
///send to self with special delivery semantics indicating which class's method
///should be called.
-CodeGen::RValue
-CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+RValue
+CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
@@ -505,18 +960,13 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
CallArgList ActualArgs;
- ActualArgs.push_back(
- std::make_pair(RValue::get(Builder.CreateBitCast(Receiver, IdTy)),
- ASTIdTy));
- ActualArgs.push_back(std::make_pair(RValue::get(cmd),
- CGF.getContext().getObjCSelType()));
+ ActualArgs.add(RValue::get(EnforceType(Builder, Receiver, IdTy)), ASTIdTy);
+ ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
FunctionType::ExtInfo());
- const llvm::FunctionType *impType =
- Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
llvm::Value *ReceiverClass = 0;
if (isCategoryImpl) {
@@ -570,43 +1020,20 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuper, 0));
Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
- // Get the IMP
- std::vector<const llvm::Type*> Params;
- Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy));
- Params.push_back(SelectorTy);
-
- llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
- llvm::Value *imp;
-
- if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
- // The lookup function returns a slot, which can be safely cached.
- llvm::Type *SlotTy = llvm::StructType::get(VMContext, PtrTy, PtrTy, PtrTy,
- IntTy, llvm::PointerType::getUnqual(impType), NULL);
-
- llvm::Constant *lookupFunction =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::PointerType::getUnqual(SlotTy), Params, true),
- "objc_slot_lookup_super");
-
- llvm::CallInst *slot = Builder.CreateCall(lookupFunction, lookupArgs,
- lookupArgs+2);
- slot->setOnlyReadsMemory();
+ ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
+ const llvm::FunctionType *impType =
+ Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
- imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
- } else {
- llvm::Constant *lookupFunction =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::PointerType::getUnqual(impType), Params, true),
- "objc_msg_lookup_super");
- imp = Builder.CreateCall(lookupFunction, lookupArgs, lookupArgs+2);
- }
+ // Get the IMP
+ llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd);
+ imp = EnforceType(Builder, imp, llvm::PointerType::getUnqual(impType));
llvm::Value *impMD[] = {
llvm::MDString::get(VMContext, Sel.getAsString()),
llvm::MDString::get(VMContext, Class->getSuperClass()->getNameAsString()),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsClassMessage)
};
- llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD, 3);
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
llvm::Instruction *call;
RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
@@ -616,8 +1043,8 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
}
/// Generate code for a message send expression.
-CodeGen::RValue
-CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+RValue
+CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
@@ -646,11 +1073,10 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
// paragraph and insist on sending messages to nil that have structure
// returns. With GCC, this generates a random return value (whatever happens
// to be on the stack / in those registers at the time) on most platforms,
- // and generates a SegV on SPARC. With LLVM it corrupts the stack.
- bool isPointerSizedReturn = false;
- if (ResultType->isAnyPointerType() ||
- ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType())
- isPointerSizedReturn = true;
+ // and generates an illegal instruction trap on SPARC. With LLVM it corrupts
+ // the stack.
+ bool isPointerSizedReturn = (ResultType->isAnyPointerType() ||
+ ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType());
llvm::BasicBlock *startBB = 0;
llvm::BasicBlock *messageBB = 0;
@@ -673,13 +1099,22 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
cmd = GetSelector(Builder, Method);
else
cmd = GetSelector(Builder, Sel);
- CallArgList ActualArgs;
+ cmd = EnforceType(Builder, cmd, SelectorTy);
+ Receiver = EnforceType(Builder, Receiver, IdTy);
- Receiver = Builder.CreateBitCast(Receiver, IdTy);
- ActualArgs.push_back(
- std::make_pair(RValue::get(Receiver), ASTIdTy));
- ActualArgs.push_back(std::make_pair(RValue::get(cmd),
- CGF.getContext().getObjCSelType()));
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class ? Class->getNameAsString() :""),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), Class!=0)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
+
+ // Get the IMP to call
+ llvm::Value *imp = LookupIMP(CGF, Receiver, cmd, node);
+
+ CallArgList ActualArgs;
+ ActualArgs.add(RValue::get(Receiver), ASTIdTy);
+ ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
CodeGenTypes &Types = CGM.getTypes();
@@ -687,72 +1122,12 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
FunctionType::ExtInfo());
const llvm::FunctionType *impType =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
-
- llvm::Value *impMD[] = {
- llvm::MDString::get(VMContext, Sel.getAsString()),
- llvm::MDString::get(VMContext, Class ? Class->getNameAsString() :""),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), Class!=0)
- };
- llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD, 3);
+ imp = EnforceType(Builder, imp, llvm::PointerType::getUnqual(impType));
- llvm::Value *imp;
// For sender-aware dispatch, we pass the sender as the third argument to a
// lookup function. When sending messages from C code, the sender is nil.
// objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
- if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
-
- std::vector<const llvm::Type*> Params;
- llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType());
- Builder.CreateStore(Receiver, ReceiverPtr);
- Params.push_back(ReceiverPtr->getType());
- Params.push_back(SelectorTy);
- llvm::Value *self;
-
- if (isa<ObjCMethodDecl>(CGF.CurCodeDecl)) {
- self = CGF.LoadObjCSelf();
- } else {
- self = llvm::ConstantPointerNull::get(IdTy);
- }
-
- Params.push_back(self->getType());
-
- // The lookup function returns a slot, which can be safely cached.
- llvm::Type *SlotTy = llvm::StructType::get(VMContext, PtrTy, PtrTy, PtrTy,
- IntTy, llvm::PointerType::getUnqual(impType), NULL);
- llvm::Constant *lookupFunction =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::PointerType::getUnqual(SlotTy), Params, true),
- "objc_msg_lookup_sender");
-
- // The lookup function is guaranteed not to capture the receiver pointer.
- if (llvm::Function *LookupFn = dyn_cast<llvm::Function>(lookupFunction)) {
- LookupFn->setDoesNotCapture(1);
- }
-
- llvm::CallInst *slot =
- Builder.CreateCall3(lookupFunction, ReceiverPtr, cmd, self);
- slot->setOnlyReadsMemory();
- slot->setMetadata(msgSendMDKind, node);
-
- imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
-
- // The lookup function may have changed the receiver, so make sure we use
- // the new one.
- ActualArgs[0] = std::make_pair(RValue::get(
- Builder.CreateLoad(ReceiverPtr, true)), ASTIdTy);
- } else {
- std::vector<const llvm::Type*> Params;
- Params.push_back(Receiver->getType());
- Params.push_back(SelectorTy);
- llvm::Constant *lookupFunction =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::PointerType::getUnqual(impType), Params, true),
- "objc_msg_lookup");
-
- imp = Builder.CreateCall2(lookupFunction, Receiver, cmd);
- cast<llvm::CallInst>(imp)->setMetadata(msgSendMDKind, node);
- }
llvm::Instruction *call;
RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
0, &call);
@@ -765,13 +1140,13 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
CGF.EmitBlock(continueBB);
if (msgRet.isScalar()) {
llvm::Value *v = msgRet.getScalarVal();
- llvm::PHINode *phi = Builder.CreatePHI(v->getType());
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
phi->addIncoming(v, messageBB);
phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB);
msgRet = RValue::get(phi);
} else if (msgRet.isAggregate()) {
llvm::Value *v = msgRet.getAggregateAddr();
- llvm::PHINode *phi = Builder.CreatePHI(v->getType());
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
const llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
llvm::AllocaInst *NullVal =
CGF.CreateTempAlloca(RetTy->getElementType(), "null");
@@ -782,11 +1157,11 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
msgRet = RValue::getAggregate(phi);
} else /* isComplex() */ {
std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal();
- llvm::PHINode *phi = Builder.CreatePHI(v.first->getType());
+ llvm::PHINode *phi = Builder.CreatePHI(v.first->getType(), 2);
phi->addIncoming(v.first, messageBB);
phi->addIncoming(llvm::Constant::getNullValue(v.first->getType()),
startBB);
- llvm::PHINode *phi2 = Builder.CreatePHI(v.second->getType());
+ llvm::PHINode *phi2 = Builder.CreatePHI(v.second->getType(), 2);
phi2->addIncoming(v.second, messageBB);
phi2->addIncoming(llvm::Constant::getNullValue(v.second->getType()),
startBB);
@@ -798,8 +1173,8 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
/// Generates a MethodList. Used in construction of a objc_class and
/// objc_category structures.
-llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
- const std::string &CategoryName,
+llvm::Constant *CGObjCGNU::GenerateMethodList(const llvm::StringRef &ClassName,
+ const llvm::StringRef &CategoryName,
const llvm::SmallVectorImpl<Selector> &MethodSels,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
bool isClassMethodList) {
@@ -809,24 +1184,24 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
llvm::StructType *ObjCMethodTy = llvm::StructType::get(VMContext,
PtrToInt8Ty, // Really a selector, but the runtime creates it us.
PtrToInt8Ty, // Method types
- llvm::PointerType::getUnqual(IMPTy), //Method pointer
+ IMPTy, //Method pointer
NULL);
std::vector<llvm::Constant*> Methods;
std::vector<llvm::Constant*> Elements;
for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
Elements.clear();
- if (llvm::Constant *Method =
+ llvm::Constant *Method =
TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
- MethodSels[i].getAsString(),
- isClassMethodList))) {
- llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
- Elements.push_back(C);
- Elements.push_back(MethodTypes[i]);
- Method = llvm::ConstantExpr::getBitCast(Method,
- llvm::PointerType::getUnqual(IMPTy));
- Elements.push_back(Method);
- Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
- }
+ MethodSels[i],
+ isClassMethodList));
+ assert(Method && "Can't generate metadata for method that doesn't exist");
+ llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
+ Elements.push_back(C);
+ Elements.push_back(MethodTypes[i]);
+ Method = llvm::ConstantExpr::getBitCast(Method,
+ IMPTy);
+ Elements.push_back(Method);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
}
// Array of method structures
@@ -951,8 +1326,10 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
Elements.push_back(llvm::ConstantInt::get(LongTy, info));
if (isMeta) {
llvm::TargetData td(&TheModule);
- Elements.push_back(llvm::ConstantInt::get(LongTy,
- td.getTypeSizeInBits(ClassTy)/8));
+ Elements.push_back(
+ llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ClassTy) /
+ CGM.getContext().getCharWidth()));
} else
Elements.push_back(InstanceSize);
Elements.push_back(IVars);
@@ -1063,10 +1440,9 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
std::vector<llvm::Constant*> Elements;
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
- int Version = CGM.getContext().getLangOptions().ObjCNonFragileABI ?
- NonFragileProtocolVersion : ProtocolVersion;
Elements.push_back(llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Version), IdTy));
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ ProtocolVersion), IdTy));
Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
Elements.push_back(ProtocolList);
Elements.push_back(MethodList);
@@ -1225,10 +1601,9 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
std::vector<llvm::Constant*> Elements;
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
- int Version = CGM.getContext().getLangOptions().ObjCNonFragileABI ?
- NonFragileProtocolVersion : ProtocolVersion;
Elements.push_back(llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Version), IdTy));
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ ProtocolVersion), IdTy));
Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
Elements.push_back(ProtocolList);
Elements.push_back(InstanceMethodList);
@@ -1486,13 +1861,9 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
"__objc_ivar_offset_value_" + ClassName +"." +
IVD->getNameAsString()));
}
- llvm::Constant *IvarOffsetArrayInit =
- llvm::ConstantArray::get(llvm::ArrayType::get(PtrToIntTy,
- IvarOffsetValues.size()), IvarOffsetValues);
- llvm::GlobalVariable *IvarOffsetArray = new llvm::GlobalVariable(TheModule,
- IvarOffsetArrayInit->getType(), false,
- llvm::GlobalValue::InternalLinkage, IvarOffsetArrayInit,
- ".ivar.offsets");
+ llvm::GlobalVariable *IvarOffsetArray =
+ MakeGlobalArray(PtrToIntTy, IvarOffsetValues, ".ivar.offsets");
+
// Collect information about instance methods
llvm::SmallVector<Selector, 16> InstanceMethodSels;
@@ -1620,8 +1991,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Only emit an ObjC load function if no Objective-C stuff has been called
if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
- ExistingProtocols.empty() && TypedSelectors.empty() &&
- UntypedSelectors.empty())
+ ExistingProtocols.empty() && SelectorTable.empty())
return NULL;
// Add all referenced protocols to a category.
@@ -1630,12 +2000,10 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
SelectorTy->getElementType());
const llvm::Type *SelStructPtrTy = SelectorTy;
- bool isSelOpaque = false;
if (SelStructTy == 0) {
SelStructTy = llvm::StructType::get(VMContext, PtrToInt8Ty,
PtrToInt8Ty, NULL);
SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
- isSelOpaque = true;
}
// Name the ObjC types to make the IR a bit easier to read
@@ -1652,7 +2020,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
ConstantStrings.push_back(NULLPtr);
llvm::StringRef StringClass = CGM.getLangOptions().ObjCConstantStringClass;
+
if (StringClass.empty()) StringClass = "NXConstantString";
+
Elements.push_back(MakeConstantString(StringClass,
".objc_static_class_name"));
Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
@@ -1682,75 +2052,62 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Elements.clear();
// Pointer to an array of selectors used in this module.
std::vector<llvm::Constant*> Selectors;
- for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
- iter = TypedSelectors.begin(), iterEnd = TypedSelectors.end();
- iter != iterEnd ; ++iter) {
- Elements.push_back(ExportUniqueString(iter->first.first, ".objc_sel_name"));
- Elements.push_back(MakeConstantString(iter->first.second,
- ".objc_sel_types"));
- Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
- Elements.clear();
- }
- for (llvm::StringMap<llvm::GlobalAlias*>::iterator
- iter = UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
- iter != iterEnd; ++iter) {
- Elements.push_back(
- ExportUniqueString(iter->getKeyData(), ".objc_sel_name"));
- Elements.push_back(NULLPtr);
- Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
- Elements.clear();
+ std::vector<llvm::GlobalAlias*> SelectorAliases;
+ for (SelectorMap::iterator iter = SelectorTable.begin(),
+ iterEnd = SelectorTable.end(); iter != iterEnd ; ++iter) {
+
+ std::string SelNameStr = iter->first.getAsString();
+ llvm::Constant *SelName = ExportUniqueString(SelNameStr, ".objc_sel_name");
+
+ llvm::SmallVectorImpl<TypedSelector> &Types = iter->second;
+ for (llvm::SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
+ e = Types.end() ; i!=e ; i++) {
+
+ llvm::Constant *SelectorTypeEncoding = NULLPtr;
+ if (!i->first.empty())
+ SelectorTypeEncoding = MakeConstantString(i->first, ".objc_sel_types");
+
+ Elements.push_back(SelName);
+ Elements.push_back(SelectorTypeEncoding);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+
+ // Store the selector alias for later replacement
+ SelectorAliases.push_back(i->second);
+ }
}
+ unsigned SelectorCount = Selectors.size();
+ // NULL-terminate the selector list. This should not actually be required,
+ // because the selector list has a length field. Unfortunately, the GCC
+ // runtime decides to ignore the length field and expects a NULL terminator,
+ // and GCC cooperates with this by always setting the length to 0.
Elements.push_back(NULLPtr);
Elements.push_back(NULLPtr);
Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
Elements.clear();
+
// Number of static selectors
- Elements.push_back(llvm::ConstantInt::get(LongTy, Selectors.size() ));
- llvm::Constant *SelectorList = MakeGlobal(
- llvm::ArrayType::get(SelStructTy, Selectors.size()), Selectors,
+ Elements.push_back(llvm::ConstantInt::get(LongTy, SelectorCount));
+ llvm::Constant *SelectorList = MakeGlobalArray(SelStructTy, Selectors,
".objc_selector_list");
Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
SelStructPtrTy));
// Now that all of the static selectors exist, create pointers to them.
- int index = 0;
- for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
- iter=TypedSelectors.begin(), iterEnd =TypedSelectors.end();
- iter != iterEnd; ++iter) {
+ for (unsigned int i=0 ; i<SelectorCount ; i++) {
+
llvm::Constant *Idxs[] = {Zeros[0],
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
- llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
- true, llvm::GlobalValue::LinkOnceODRLinkage,
- llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
- MangleSelectorTypes(".objc_sel_ptr"+iter->first.first+"."+
- iter->first.second));
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), i), Zeros[0]};
+ // FIXME: We're generating redundant loads and stores here!
+ llvm::Constant *SelPtr = llvm::ConstantExpr::getGetElementPtr(SelectorList,
+ Idxs, 2);
// If selectors are defined as an opaque type, cast the pointer to this
// type.
- if (isSelOpaque) {
- SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
- llvm::PointerType::getUnqual(SelectorTy));
- }
- (*iter).second->replaceAllUsesWith(SelPtr);
- (*iter).second->eraseFromParent();
- }
- for (llvm::StringMap<llvm::GlobalAlias*>::iterator
- iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
- iter != iterEnd; iter++) {
- llvm::Constant *Idxs[] = {Zeros[0],
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
- llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
- true, llvm::GlobalValue::LinkOnceODRLinkage,
- llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
- MangleSelectorTypes(std::string(".objc_sel_ptr")+iter->getKey().str()));
- // If selectors are defined as an opaque type, cast the pointer to this
- // type.
- if (isSelOpaque) {
- SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
- llvm::PointerType::getUnqual(SelectorTy));
- }
- (*iter).second->replaceAllUsesWith(SelPtr);
- (*iter).second->eraseFromParent();
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, SelectorTy);
+ SelectorAliases[i]->replaceAllUsesWith(SelPtr);
+ SelectorAliases[i]->eraseFromParent();
}
+
// Number of classes defined.
Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
Classes.size()));
@@ -1772,19 +2129,22 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::StructType * ModuleTy = llvm::StructType::get(VMContext, LongTy, LongTy,
PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), NULL);
Elements.clear();
- // Runtime version used for compatibility checking.
- if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
- Elements.push_back(llvm::ConstantInt::get(LongTy,
- NonFragileRuntimeVersion));
- } else {
- Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
- }
+ // Runtime version, used for ABI compatibility checking.
+ Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
// sizeof(ModuleTy)
llvm::TargetData td(&TheModule);
- Elements.push_back(llvm::ConstantInt::get(LongTy,
- td.getTypeSizeInBits(ModuleTy)/8));
- //FIXME: Should be the path to the file where this module was declared
- Elements.push_back(NULLPtr);
+ Elements.push_back(
+ llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ModuleTy) /
+ CGM.getContext().getCharWidth()));
+
+ // The path to the source file where this module was declared
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID());
+ std::string path =
+ std::string(mainFile->getDir()->getName()) + '/' + mainFile->getName();
+ Elements.push_back(MakeConstantString(path, ".objc_source_file_name"));
+
Elements.push_back(SymTab);
llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
@@ -1813,9 +2173,9 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
const ObjCCategoryImplDecl *OCD =
dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
- std::string CategoryName = OCD ? OCD->getNameAsString() : "";
- std::string ClassName = CD->getName();
- std::string MethodName = OMD->getSelector().getAsString();
+ llvm::StringRef CategoryName = OCD ? OCD->getName() : "";
+ llvm::StringRef ClassName = CD->getName();
+ Selector MethodName = OMD->getSelector();
bool isClassMethod = !OMD->isInstanceMethod();
CodeGenTypes &Types = CGM.getTypes();
@@ -1833,121 +2193,31 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
}
llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(IdTy);
- Params.push_back(SelectorTy);
- Params.push_back(SizeTy);
- Params.push_back(BoolTy);
- // void objc_getProperty (id, SEL, ptrdiff_t, bool)
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(IdTy, Params, false);
- return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
- "objc_getProperty"));
+ return GetPropertyFn;
}
llvm::Function *CGObjCGNU::GetPropertySetFunction() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(IdTy);
- Params.push_back(SelectorTy);
- Params.push_back(SizeTy);
- Params.push_back(IdTy);
- Params.push_back(BoolTy);
- Params.push_back(BoolTy);
- // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
- return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
- "objc_setProperty"));
+ return SetPropertyFn;
}
llvm::Function *CGObjCGNU::GetGetStructFunction() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(PtrTy);
- Params.push_back(PtrTy);
- Params.push_back(PtrDiffTy);
- Params.push_back(BoolTy);
- Params.push_back(BoolTy);
- // objc_setPropertyStruct (void*, void*, ptrdiff_t, BOOL, BOOL)
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
- return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
- "objc_getPropertyStruct"));
+ return GetStructPropertyFn;
}
llvm::Function *CGObjCGNU::GetSetStructFunction() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(PtrTy);
- Params.push_back(PtrTy);
- Params.push_back(PtrDiffTy);
- Params.push_back(BoolTy);
- Params.push_back(BoolTy);
- // objc_setPropertyStruct (void*, void*, ptrdiff_t, BOOL, BOOL)
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
- return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
- "objc_setPropertyStruct"));
+ return SetStructPropertyFn;
}
llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
- CodeGen::CodeGenTypes &Types = CGM.getTypes();
- ASTContext &Ctx = CGM.getContext();
- // void objc_enumerationMutation (id)
- llvm::SmallVector<CanQualType,1> Params;
- Params.push_back(ASTIdTy);
- const llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
- FunctionType::ExtInfo()), false);
- return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+ return EnumerationMutationFn;
}
-namespace {
- struct CallSyncExit : EHScopeStack::Cleanup {
- llvm::Value *SyncExitFn;
- llvm::Value *SyncArg;
- CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
- : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
- CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
- }
- };
-}
-
-void CGObjCGNU::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+void CGObjCGNU::EmitSynchronizedStmt(CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S) {
- std::vector<const llvm::Type*> Args(1, IdTy);
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
-
- // Evaluate the lock operand. This should dominate the cleanup.
- llvm::Value *SyncArg =
- CGF.EmitScalarExpr(S.getSynchExpr());
-
- // Acquire the lock.
- llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
- CGF.Builder.CreateCall(SyncEnter, SyncArg);
-
- // Register an all-paths cleanup to release the lock.
- llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
- CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, SyncExit, SyncArg);
-
- // Emit the body of the statement.
- CGF.EmitStmt(S.getSynchBody());
-
- // Pop the lock-release cleanup.
- CGF.PopCleanupBlock();
+ EmitAtSynchronizedStmt(CGF, S, SyncEnterFn, SyncExitFn);
}
-namespace {
- struct CatchHandler {
- const VarDecl *Variable;
- const Stmt *Body;
- llvm::BasicBlock *Block;
- llvm::Value *TypeInfo;
- };
-}
-void CGObjCGNU::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+void CGObjCGNU::EmitTryStmt(CodeGenFunction &CGF,
const ObjCAtTryStmt &S) {
// Unlike the Apple non-fragile runtimes, which also uses
// unwind-based zero cost exceptions, the GNU Objective C runtime's
@@ -1957,127 +2227,17 @@ void CGObjCGNU::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
// catch handlers with calls to __blah_begin_catch/__blah_end_catch
// (or even _Unwind_DeleteException), but probably doesn't
// interoperate very well with foreign exceptions.
-
- // Jump destination for falling out of catch bodies.
- CodeGenFunction::JumpDest Cont;
- if (S.getNumCatchStmts())
- Cont = CGF.getJumpDestInCurrentScope("eh.cont");
-
- // We handle @finally statements by pushing them as a cleanup
- // before entering the catch.
- CodeGenFunction::FinallyInfo FinallyInfo;
- if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) {
- std::vector<const llvm::Type*> Args(1, IdTy);
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
- llvm::Constant *Rethrow =
- CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
-
- FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(), 0, 0,
- Rethrow);
- }
-
- llvm::SmallVector<CatchHandler, 8> Handlers;
-
- // Enter the catch, if there is one.
- if (S.getNumCatchStmts()) {
- for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
- const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
- const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
-
- Handlers.push_back(CatchHandler());
- CatchHandler &Handler = Handlers.back();
- Handler.Variable = CatchDecl;
- Handler.Body = CatchStmt->getCatchBody();
- Handler.Block = CGF.createBasicBlock("catch");
-
- // @catch() and @catch(id) both catch any ObjC exception.
- // Treat them as catch-alls.
- // FIXME: this is what this code was doing before, but should 'id'
- // really be catching foreign exceptions?
- if (!CatchDecl
- || CatchDecl->getType()->isObjCIdType()
- || CatchDecl->getType()->isObjCQualifiedIdType()) {
-
- Handler.TypeInfo = 0; // catch-all
-
- // Don't consider any other catches.
- break;
- }
-
- // All other types should be Objective-C interface pointer types.
- const ObjCObjectPointerType *OPT =
- CatchDecl->getType()->getAs<ObjCObjectPointerType>();
- assert(OPT && "Invalid @catch type.");
- const ObjCInterfaceDecl *IDecl =
- OPT->getObjectType()->getInterface();
- assert(IDecl && "Invalid @catch type.");
- Handler.TypeInfo = MakeConstantString(IDecl->getNameAsString());
- }
-
- EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
- for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
- Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
- }
-
- // Emit the try body.
- CGF.EmitStmt(S.getTryBody());
-
- // Leave the try.
- if (S.getNumCatchStmts())
- CGF.EHStack.popCatch();
-
- // Remember where we were.
- CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
-
- // Emit the handlers.
- for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
- CatchHandler &Handler = Handlers[I];
- CGF.EmitBlock(Handler.Block);
-
- llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
-
- // Bind the catch parameter if it exists.
- if (const VarDecl *CatchParam = Handler.Variable) {
- const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
- Exn = CGF.Builder.CreateBitCast(Exn, CatchType);
-
- CGF.EmitAutoVarDecl(*CatchParam);
- CGF.Builder.CreateStore(Exn, CGF.GetAddrOfLocalVar(CatchParam));
- }
-
- CGF.ObjCEHValueStack.push_back(Exn);
- CGF.EmitStmt(Handler.Body);
- CGF.ObjCEHValueStack.pop_back();
-
- CGF.EmitBranchThroughCleanup(Cont);
- }
-
- // Go back to the try-statement fallthrough.
- CGF.Builder.restoreIP(SavedIP);
-
- // Pop out of the finally.
- if (S.getFinallyStmt())
- CGF.ExitFinallyBlock(FinallyInfo);
-
- if (Cont.isValid()) {
- if (Cont.getBlock()->use_empty())
- delete Cont.getBlock();
- else
- CGF.EmitBlock(Cont.getBlock());
- }
+ //
+ // In Objective-C++ mode, we actually emit something equivalent to the C++
+ // exception handler.
+ EmitTryCatchStmt(CGF, S, EnterCatchFn, ExitCatchFn, ExceptionReThrowFn);
+ return ;
}
-void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
const ObjCAtThrowStmt &S) {
llvm::Value *ExceptionAsObject;
- std::vector<const llvm::Type*> Args(1, IdTy);
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
- llvm::Value *ThrowFn =
- CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
-
if (const Expr *ThrowExpr = S.getThrowExpr()) {
llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
ExceptionAsObject = Exception;
@@ -2100,24 +2260,24 @@ void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
// as a result of stupidity.
llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
if (!UnwindBB) {
- CGF.Builder.CreateCall(ThrowFn, ExceptionAsObject);
+ CGF.Builder.CreateCall(ExceptionThrowFn, ExceptionAsObject);
CGF.Builder.CreateUnreachable();
} else {
- CGF.Builder.CreateInvoke(ThrowFn, UnwindBB, UnwindBB, &ExceptionAsObject,
+ CGF.Builder.CreateInvoke(ExceptionThrowFn, UnwindBB, UnwindBB, &ExceptionAsObject,
&ExceptionAsObject+1);
}
// Clear the insertion point to indicate we are in unreachable code.
CGF.Builder.ClearInsertionPoint();
}
-llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
llvm::Value *AddrWeakObj) {
CGBuilderTy B = CGF.Builder;
AddrWeakObj = EnforceType(B, AddrWeakObj, IdTy);
return B.CreateCall(WeakReadFn, AddrWeakObj);
}
-void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
CGBuilderTy B = CGF.Builder;
src = EnforceType(B, src, IdTy);
@@ -2125,7 +2285,7 @@ void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
B.CreateCall2(WeakAssignFn, src, dst);
}
-void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst,
bool threadlocal) {
CGBuilderTy B = CGF.Builder;
@@ -2138,7 +2298,7 @@ void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
assert(false && "EmitObjCGlobalAssign - Threal Local API NYI");
}
-void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst,
llvm::Value *ivarOffset) {
CGBuilderTy B = CGF.Builder;
@@ -2147,7 +2307,7 @@ void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
B.CreateCall3(IvarAssignFn, src, dst, ivarOffset);
}
-void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
CGBuilderTy B = CGF.Builder;
src = EnforceType(B, src, IdTy);
@@ -2155,7 +2315,7 @@ void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
B.CreateCall2(StrongCastAssignFn, src, dst);
}
-void CGObjCGNU::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
llvm::Value *Size) {
@@ -2212,7 +2372,7 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
return IvarOffsetPointer;
}
-LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+LValue CGObjCGNU::EmitObjCValueForIvar(CodeGenFunction &CGF,
QualType ObjectTy,
llvm::Value *BaseValue,
const ObjCIvarDecl *Ivar,
@@ -2240,19 +2400,23 @@ static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
return 0;
}
-llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) {
if (CGM.getLangOptions().ObjCNonFragileABI) {
Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
- return CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
- ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar"));
+ return CGF.Builder.CreateZExtOrBitCast(
+ CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar")),
+ PtrDiffTy);
}
uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
- return llvm::ConstantInt::get(LongTy, Offset, "ivar");
+ return llvm::ConstantInt::get(PtrDiffTy, Offset, "ivar");
}
-CodeGen::CGObjCRuntime *
-CodeGen::CreateGNUObjCRuntime(CodeGen::CodeGenModule &CGM) {
- return new CGObjCGNU(CGM);
+CGObjCRuntime *
+clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) {
+ if (CGM.getLangOptions().ObjCNonFragileABI)
+ return new CGObjCGNUstep(CGM);
+ return new CGObjCGCC(CGM);
}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 8dbd85f..2b1cfe3 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This provides Objective-C code generation targetting the Apple runtime.
+// This provides Objective-C code generation targeting the Apple runtime.
//
//===----------------------------------------------------------------------===//
@@ -42,9 +42,6 @@
using namespace clang;
using namespace CodeGen;
-// Common CGObjCRuntime functions, these don't belong here, but they
-// don't belong in CGObjCRuntime either so we will live with it for
-// now.
static void EmitNullReturnInitialization(CodeGenFunction &CGF,
ReturnValueSlot &returnSlot,
@@ -55,112 +52,6 @@ static void EmitNullReturnInitialization(CodeGenFunction &CGF,
CGF.EmitNullInitialization(returnSlot.getValue(), resultType);
}
-static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
- const ObjCInterfaceDecl *OID,
- const ObjCImplementationDecl *ID,
- const ObjCIvarDecl *Ivar) {
- const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
-
- // FIXME: We should eliminate the need to have ObjCImplementationDecl passed
- // in here; it should never be necessary because that should be the lexical
- // decl context for the ivar.
-
- // If we know have an implementation (and the ivar is in it) then
- // look up in the implementation layout.
- const ASTRecordLayout *RL;
- if (ID && ID->getClassInterface() == Container)
- RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
- else
- RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
-
- // Compute field index.
- //
- // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is
- // implemented. This should be fixed to get the information from the layout
- // directly.
- unsigned Index = 0;
- llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- CGM.getContext().ShallowCollectObjCIvars(Container, Ivars);
- for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
- if (Ivar == Ivars[k])
- break;
- ++Index;
- }
- assert(Index != Ivars.size() && "Ivar is not inside container!");
- assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
-
- return RL->getFieldOffset(Index);
-}
-
-uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
- const ObjCInterfaceDecl *OID,
- const ObjCIvarDecl *Ivar) {
- return LookupFieldBitOffset(CGM, OID, 0, Ivar) / 8;
-}
-
-uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
- const ObjCImplementationDecl *OID,
- const ObjCIvarDecl *Ivar) {
- return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / 8;
-}
-
-LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
- const ObjCInterfaceDecl *OID,
- llvm::Value *BaseValue,
- const ObjCIvarDecl *Ivar,
- unsigned CVRQualifiers,
- llvm::Value *Offset) {
- // Compute (type*) ( (char *) BaseValue + Offset)
- const llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- QualType IvarTy = Ivar->getType();
- const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
- llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
- V = CGF.Builder.CreateGEP(V, Offset, "add.ptr");
- V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
-
- if (!Ivar->isBitField()) {
- LValue LV = CGF.MakeAddrLValue(V, IvarTy);
- LV.getQuals().addCVRQualifiers(CVRQualifiers);
- return LV;
- }
-
- // We need to compute an access strategy for this bit-field. We are given the
- // offset to the first byte in the bit-field, the sub-byte offset is taken
- // from the original layout. We reuse the normal bit-field access strategy by
- // treating this as an access to a struct where the bit-field is in byte 0,
- // and adjust the containing type size as appropriate.
- //
- // FIXME: Note that currently we make a very conservative estimate of the
- // alignment of the bit-field, because (a) it is not clear what guarantees the
- // runtime makes us, and (b) we don't have a way to specify that the struct is
- // at an alignment plus offset.
- //
- // Note, there is a subtle invariant here: we can only call this routine on
- // non-synthesized ivars but we may be called for synthesized ivars. However,
- // a synthesized ivar can never be a bit-field, so this is safe.
- const ASTRecordLayout &RL =
- CGF.CGM.getContext().getASTObjCInterfaceLayout(OID);
- uint64_t TypeSizeInBits = CGF.CGM.getContext().toBits(RL.getSize());
- uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
- uint64_t BitOffset = FieldBitOffset % 8;
- uint64_t ContainingTypeAlign = 8;
- uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
- uint64_t BitFieldSize =
- Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
-
- // Allocate a new CGBitFieldInfo object to describe this access.
- //
- // FIXME: This is incredibly wasteful, these should be uniqued or part of some
- // layout object. However, this is blocked on other cleanups to the
- // Objective-C code, so for now we just live with allocating a bunch of these
- // objects.
- CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
- CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
- ContainingTypeSize, ContainingTypeAlign));
-
- return LValue::MakeBitfield(V, *Info,
- IvarTy.getCVRQualifiers() | CVRQualifiers);
-}
///
@@ -328,7 +219,7 @@ public:
CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
Params.push_back(IdType);
Params.push_back(SelType);
- Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
Params.push_back(Ctx.BoolTy);
const llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(IdType, Params,
@@ -346,7 +237,7 @@ public:
CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
Params.push_back(IdType);
Params.push_back(SelType);
- Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
Params.push_back(IdType);
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
@@ -1294,7 +1185,8 @@ private:
llvm::Value *Receiver,
QualType Arg0Ty,
bool IsSuper,
- const CallArgList &CallArgs);
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
/// GetClassGlobal - Return the global variable for the Objective-C
/// class of the given name.
@@ -1555,7 +1447,7 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// Create and init a super structure; this is a (receiver, class)
// pair we will pass to objc_msgSendSuper.
llvm::Value *ObjCSuper =
- CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
CGF.Builder.CreateStore(ReceiverAsObject,
@@ -1630,9 +1522,8 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
CallArgList ActualArgs;
if (!IsSuper)
Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
- ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
- ActualArgs.push_back(std::make_pair(RValue::get(Sel),
- CGF.getContext().getObjCSelType()));
+ ActualArgs.add(RValue::get(Arg0), Arg0Ty);
+ ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType());
ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
CodeGenTypes &Types = CGM.getTypes();
@@ -2177,6 +2068,8 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
4, true);
DefinedCategories.push_back(GV);
DefinedCategoryNames.insert(ExtName.str());
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
}
// FIXME: Get from somewhere?
@@ -2304,6 +2197,8 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
else
GV = CreateMetadataVar(Name, Init, Section, 4, true);
DefinedClasses.push_back(GV);
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
}
llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
@@ -3596,7 +3491,9 @@ llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
llvm::ConstantArray::get(VMContext,
Ident->getNameStart()),
- "__TEXT,__cstring,cstring_literals",
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_classname,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
@@ -3668,7 +3565,7 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
// Note that 'i' here is actually the field index inside RD of Field,
// although this dependency is hidden.
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- FieldOffset = RL.getFieldOffset(i) / 8;
+ FieldOffset = RL.getFieldOffset(i) / ByteSizeInBits;
} else
FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
@@ -3920,7 +3817,9 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string& BitMap) {
llvm::GlobalVariable * Entry =
CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
llvm::ConstantArray::get(VMContext, BitMap.c_str()),
- "__TEXT,__cstring,cstring_literals",
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_classname,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -3999,7 +3898,9 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
llvm::ConstantArray::get(VMContext, Sel.getAsString()),
- "__TEXT,__cstring,cstring_literals",
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_methname,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
@@ -4019,7 +3920,9 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
llvm::ConstantArray::get(VMContext, TypeStr),
- "__TEXT,__cstring,cstring_literals",
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_methtype,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
@@ -4035,7 +3938,9 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D) {
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
llvm::ConstantArray::get(VMContext, TypeStr),
- "__TEXT,__cstring,cstring_literals",
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_methtype,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
@@ -4173,11 +4078,11 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// }
RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
Ctx.getTranslationUnitDecl(),
- SourceLocation(),
+ SourceLocation(), SourceLocation(),
&Ctx.Idents.get("_objc_super"));
- RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
Ctx.getObjCIdType(), 0, 0, false));
- RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
Ctx.getObjCClassType(), 0, 0, false));
RD->completeDefinition();
@@ -4636,11 +4541,11 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// First the clang type for struct _message_ref_t
RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
Ctx.getTranslationUnitDecl(),
- SourceLocation(),
+ SourceLocation(), SourceLocation(),
&Ctx.Idents.get("_message_ref_t"));
- RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
Ctx.VoidPtrTy, 0, 0, false));
- RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
Ctx.getObjCSelType(), 0, 0, false));
RD->completeDefinition();
@@ -4966,7 +4871,7 @@ void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
if (!RL.getFieldCount())
InstanceStart = InstanceSize;
else
- InstanceStart = RL.getFieldOffset(0) / 8;
+ InstanceStart = RL.getFieldOffset(0) / CGM.getContext().getCharWidth();
}
void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
@@ -5017,14 +4922,14 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
Root = Super;
IsAGV = GetClassGlobal(ObjCMetaClassName + Root->getNameAsString());
- if (Root->hasAttr<WeakImportAttr>())
+ if (Root->isWeakImported())
IsAGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
// work on super class metadata symbol.
std::string SuperClassName =
ObjCMetaClassName +
ID->getClassInterface()->getSuperClass()->getNameAsString();
SuperClassGV = GetClassGlobal(SuperClassName);
- if (ID->getClassInterface()->getSuperClass()->hasAttr<WeakImportAttr>())
+ if (ID->getClassInterface()->getSuperClass()->isWeakImported())
SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
}
llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags,
@@ -5054,7 +4959,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
std::string RootClassName =
ID->getClassInterface()->getSuperClass()->getNameAsString();
SuperClassGV = GetClassGlobal(ObjCClassName + RootClassName);
- if (ID->getClassInterface()->getSuperClass()->hasAttr<WeakImportAttr>())
+ if (ID->getClassInterface()->getSuperClass()->isWeakImported())
SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
}
GetClassSizeInfo(ID, InstanceStart, InstanceSize);
@@ -5076,6 +4981,8 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
// Force the definition of the EHType if necessary.
if (flags & CLS_EXCEPTION)
GetInterfaceEHType(ID->getClassInterface(), true);
+ // Make sure method definition entries are all clear for next implementation.
+ MethodDefinitions.clear();
}
/// GenerateProtocolRef - This routine is called to generate code for
@@ -5136,7 +5043,7 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Values[0] = GetClassName(OCD->getIdentifier());
// meta-class entry symbol
llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
- if (Interface->hasAttr<WeakImportAttr>())
+ if (Interface->isWeakImported())
ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
Values[1] = ClassGV;
@@ -5204,6 +5111,8 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
// Determine if this category is also "non-lazy".
if (ImplementationIsNonLazy(OCD))
DefinedNonLazyCategories.push_back(GCATV);
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
}
/// GetMethodConstant - Return a struct objc_method constant for the
@@ -5622,7 +5531,8 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
llvm::Value *Receiver,
QualType Arg0Ty,
bool IsSuper,
- const CallArgList &CallArgs) {
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
// FIXME. Even though IsSuper is passes. This function doese not handle calls
// to 'super' receivers.
CodeGenTypes &Types = CGM.getTypes();
@@ -5685,15 +5595,15 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
llvm::Value *Arg1 = CGF.Builder.CreateBitCast(GV, ObjCTypes.MessageRefPtrTy);
CallArgList ActualArgs;
- ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
- ActualArgs.push_back(std::make_pair(RValue::get(Arg1),
- ObjCTypes.MessageRefCPtrTy));
+ ActualArgs.add(RValue::get(Arg0), Arg0Ty);
+ ActualArgs.add(RValue::get(Arg1), ObjCTypes.MessageRefCPtrTy);
ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
const CGFunctionInfo &FnInfo1 = Types.getFunctionInfo(ResultType, ActualArgs,
FunctionType::ExtInfo());
llvm::Value *Callee = CGF.Builder.CreateStructGEP(Arg1, 0);
Callee = CGF.Builder.CreateLoad(Callee);
- const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo1, true);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(FnInfo1, Method ? Method->isVariadic() : false);
Callee = CGF.Builder.CreateBitCast(Callee,
llvm::PointerType::getUnqual(FTy));
return CGF.EmitCall(FnInfo1, Callee, Return, ActualArgs);
@@ -5716,7 +5626,7 @@ CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
false, CallArgs, Method, ObjCTypes)
: EmitMessageSend(CGF, Return, ResultType, Sel,
Receiver, CGF.getContext().getObjCIdType(),
- false, CallArgs);
+ false, CallArgs, Method);
}
llvm::GlobalVariable *
@@ -5807,7 +5717,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
/// decl.
llvm::Value *CGObjCNonFragileABIMac::GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID) {
- if (ID->hasAttr<WeakImportAttr>()) {
+ if (ID->isWeakImported()) {
std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
@@ -5834,7 +5744,7 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// Create and init a super structure; this is a (receiver, class)
// pair we will pass to objc_msgSendSuper.
llvm::Value *ObjCSuper =
- CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
@@ -5870,7 +5780,7 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
true, CallArgs, Method, ObjCTypes)
: EmitMessageSend(CGF, Return, ResultType, Sel,
ObjCSuper, ObjCTypes.SuperPtrCTy,
- true, CallArgs);
+ true, CallArgs, Method);
}
llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
@@ -6008,65 +5918,12 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
return;
}
-namespace {
- struct CallSyncExit : EHScopeStack::Cleanup {
- llvm::Value *SyncExitFn;
- llvm::Value *SyncArg;
- CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
- : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
- CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
- }
- };
-}
-
void
CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S) {
- // Evaluate the lock operand. This should dominate the cleanup.
- llvm::Value *SyncArg = CGF.EmitScalarExpr(S.getSynchExpr());
-
- // Acquire the lock.
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
- CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
- ->setDoesNotThrow();
-
- // Register an all-paths cleanup to release the lock.
- CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup,
- ObjCTypes.getSyncExitFn(),
- SyncArg);
-
- // Emit the body of the statement.
- CGF.EmitStmt(S.getSynchBody());
-
- // Pop the lock-release cleanup.
- CGF.PopCleanupBlock();
-}
-
-namespace {
- struct CatchHandler {
- const VarDecl *Variable;
- const Stmt *Body;
- llvm::BasicBlock *Block;
- llvm::Value *TypeInfo;
- };
-
- struct CallObjCEndCatch : EHScopeStack::Cleanup {
- CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
- MightThrow(MightThrow), Fn(Fn) {}
- bool MightThrow;
- llvm::Value *Fn;
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- if (!MightThrow) {
- CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
- return;
- }
-
- CGF.EmitCallOrInvoke(Fn, 0, 0);
- }
- };
+ EmitAtSynchronizedStmt(CGF, S,
+ cast<llvm::Function>(ObjCTypes.getSyncEnterFn()),
+ cast<llvm::Function>(ObjCTypes.getSyncExitFn()));
}
llvm::Constant *
@@ -6096,104 +5953,10 @@ CGObjCNonFragileABIMac::GetEHType(QualType T) {
void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtTryStmt &S) {
- // Jump destination for falling out of catch bodies.
- CodeGenFunction::JumpDest Cont;
- if (S.getNumCatchStmts())
- Cont = CGF.getJumpDestInCurrentScope("eh.cont");
-
- CodeGenFunction::FinallyInfo FinallyInfo;
- if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
- FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(),
- ObjCTypes.getObjCBeginCatchFn(),
- ObjCTypes.getObjCEndCatchFn(),
- ObjCTypes.getExceptionRethrowFn());
-
- llvm::SmallVector<CatchHandler, 8> Handlers;
-
- // Enter the catch, if there is one.
- if (S.getNumCatchStmts()) {
- for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
- const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
- const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
-
- Handlers.push_back(CatchHandler());
- CatchHandler &Handler = Handlers.back();
- Handler.Variable = CatchDecl;
- Handler.Body = CatchStmt->getCatchBody();
- Handler.Block = CGF.createBasicBlock("catch");
-
- // @catch(...) always matches.
- if (!CatchDecl) {
- Handler.TypeInfo = 0; // catch-all
- // Don't consider any other catches.
- break;
- }
-
- Handler.TypeInfo = GetEHType(CatchDecl->getType());
- }
-
- EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
- for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
- Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
- }
-
- // Emit the try body.
- CGF.EmitStmt(S.getTryBody());
-
- // Leave the try.
- if (S.getNumCatchStmts())
- CGF.EHStack.popCatch();
-
- // Remember where we were.
- CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
-
- // Emit the handlers.
- for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
- CatchHandler &Handler = Handlers[I];
-
- CGF.EmitBlock(Handler.Block);
- llvm::Value *RawExn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
-
- // Enter the catch.
- llvm::CallInst *Exn =
- CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), RawExn,
- "exn.adjusted");
- Exn->setDoesNotThrow();
-
- // Add a cleanup to leave the catch.
- bool EndCatchMightThrow = (Handler.Variable == 0);
- CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
- EndCatchMightThrow,
- ObjCTypes.getObjCEndCatchFn());
-
- // Bind the catch parameter if it exists.
- if (const VarDecl *CatchParam = Handler.Variable) {
- const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
- llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
-
- CGF.EmitAutoVarDecl(*CatchParam);
- CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam));
- }
-
- CGF.ObjCEHValueStack.push_back(Exn);
- CGF.EmitStmt(Handler.Body);
- CGF.ObjCEHValueStack.pop_back();
-
- // Leave the earlier cleanup.
- CGF.PopCleanupBlock();
-
- CGF.EmitBranchThroughCleanup(Cont);
- }
-
- // Go back to the try-statement fallthrough.
- CGF.Builder.restoreIP(SavedIP);
-
- // Pop out of the normal cleanup on the finally.
- if (S.getFinallyStmt())
- CGF.ExitFinallyBlock(FinallyInfo);
-
- if (Cont.isValid())
- CGF.EmitBlock(Cont.getBlock());
+ EmitTryCatchStmt(CGF, S,
+ cast<llvm::Function>(ObjCTypes.getObjCBeginCatchFn()),
+ cast<llvm::Function>(ObjCTypes.getObjCEndCatchFn()),
+ cast<llvm::Function>(ObjCTypes.getExceptionRethrowFn()));
}
/// EmitThrowStmt - Generate code for a throw statement.
@@ -6290,10 +6053,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
CodeGen::CGObjCRuntime *
CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ if (CGM.getLangOptions().ObjCNonFragileABI)
+ return new CGObjCNonFragileABIMac(CGM);
return new CGObjCMac(CGM);
}
-
-CodeGen::CGObjCRuntime *
-CodeGen::CreateMacNonFragileABIObjCRuntime(CodeGen::CodeGenModule &CGM) {
- return new CGObjCNonFragileABIMac(CGM);
-}
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
new file mode 100644
index 0000000..3d854d4
--- /dev/null
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -0,0 +1,310 @@
+//==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This abstract class defines the interface for Objective-C runtime-specific
+// code generation. It provides some concrete helper methods for functionality
+// shared between all (or most) of the Objective-C runtimes supported by clang.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+
+#include "CGRecordLayout.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+
+#include "llvm/Support/CallSite.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCImplementationDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+
+ // FIXME: We should eliminate the need to have ObjCImplementationDecl passed
+ // in here; it should never be necessary because that should be the lexical
+ // decl context for the ivar.
+
+ // If we know have an implementation (and the ivar is in it) then
+ // look up in the implementation layout.
+ const ASTRecordLayout *RL;
+ if (ID && ID->getClassInterface() == Container)
+ RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
+ else
+ RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
+
+ // Compute field index.
+ //
+ // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is
+ // implemented. This should be fixed to get the information from the layout
+ // directly.
+ unsigned Index = 0;
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ CGM.getContext().ShallowCollectObjCIvars(Container, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+ if (Ivar == Ivars[k])
+ break;
+ ++Index;
+ }
+ assert(Index != Ivars.size() && "Ivar is not inside container!");
+ assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
+
+ return RL->getFieldOffset(Index);
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID, 0, Ivar) /
+ CGM.getContext().getCharWidth();
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) /
+ CGM.getContext().getCharWidth();
+}
+
+LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset) {
+ // Compute (type*) ( (char *) BaseValue + Offset)
+ const llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ QualType IvarTy = Ivar->getType();
+ const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+ llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
+ V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
+ V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
+
+ if (!Ivar->isBitField()) {
+ LValue LV = CGF.MakeAddrLValue(V, IvarTy);
+ LV.getQuals().addCVRQualifiers(CVRQualifiers);
+ return LV;
+ }
+
+ // We need to compute an access strategy for this bit-field. We are given the
+ // offset to the first byte in the bit-field, the sub-byte offset is taken
+ // from the original layout. We reuse the normal bit-field access strategy by
+ // treating this as an access to a struct where the bit-field is in byte 0,
+ // and adjust the containing type size as appropriate.
+ //
+ // FIXME: Note that currently we make a very conservative estimate of the
+ // alignment of the bit-field, because (a) it is not clear what guarantees the
+ // runtime makes us, and (b) we don't have a way to specify that the struct is
+ // at an alignment plus offset.
+ //
+ // Note, there is a subtle invariant here: we can only call this routine on
+ // non-synthesized ivars but we may be called for synthesized ivars. However,
+ // a synthesized ivar can never be a bit-field, so this is safe.
+ const ASTRecordLayout &RL =
+ CGF.CGM.getContext().getASTObjCInterfaceLayout(OID);
+ uint64_t TypeSizeInBits = CGF.CGM.getContext().toBits(RL.getSize());
+ uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
+ uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
+ uint64_t ContainingTypeAlign = CGF.CGM.getContext().Target.getCharAlign();
+ uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
+ uint64_t BitFieldSize =
+ Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
+
+ // Allocate a new CGBitFieldInfo object to describe this access.
+ //
+ // FIXME: This is incredibly wasteful, these should be uniqued or part of some
+ // layout object. However, this is blocked on other cleanups to the
+ // Objective-C code, so for now we just live with allocating a bunch of these
+ // objects.
+ CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
+ CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
+ ContainingTypeSize, ContainingTypeAlign));
+
+ return LValue::MakeBitfield(V, *Info,
+ IvarTy.getCVRQualifiers() | CVRQualifiers);
+}
+
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
+
+ struct CallObjCEndCatch : EHScopeStack::Cleanup {
+ CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
+ MightThrow(MightThrow), Fn(Fn) {}
+ bool MightThrow;
+ llvm::Value *Fn;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(Fn, 0, 0);
+ }
+ };
+}
+
+
+void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S,
+ llvm::Function *beginCatchFn,
+ llvm::Function *endCatchFn,
+ llvm::Function *exceptionRethrowFn) {
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
+ FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(),
+ beginCatchFn,
+ endCatchFn,
+ exceptionRethrowFn);
+
+ llvm::SmallVector<CatchHandler, 8> Handlers;
+
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
+
+ // @catch(...) always matches.
+ if (!CatchDecl) {
+ Handler.TypeInfo = 0; // catch-all
+ // Don't consider any other catches.
+ break;
+ }
+
+ Handler.TypeInfo = GetEHType(CatchDecl->getType());
+ }
+
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
+ }
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
+
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.EHStack.popCatch();
+
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
+
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *RawExn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
+
+ // Enter the catch.
+ llvm::Value *Exn = RawExn;
+ if (beginCatchFn) {
+ Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted");
+ cast<llvm::CallInst>(Exn)->setDoesNotThrow();
+ }
+
+ if (endCatchFn) {
+ // Add a cleanup to leave the catch.
+ bool EndCatchMightThrow = (Handler.Variable == 0);
+
+ CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
+ EndCatchMightThrow,
+ endCatchFn);
+ }
+
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
+
+ CGF.EmitAutoVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
+
+ // Leave the earlier cleanup.
+ if (endCatchFn)
+ CGF.PopCleanupBlock();
+
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
+
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
+
+ // Pop out of the normal cleanup on the finally.
+ if (S.getFinallyStmt())
+ CGF.ExitFinallyBlock(FinallyInfo);
+
+ if (Cont.isValid())
+ CGF.EmitBlock(Cont.getBlock());
+}
+
+namespace {
+ struct CallSyncExit : EHScopeStack::Cleanup {
+ llvm::Value *SyncExitFn;
+ llvm::Value *SyncArg;
+ CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
+ : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
+ }
+ };
+}
+
+void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S,
+ llvm::Function *syncEnterFn,
+ llvm::Function *syncExitFn) {
+ // Evaluate the lock operand. This should dominate the cleanup.
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(S.getSynchExpr());
+
+ // Acquire the lock.
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, syncEnterFn->getFunctionType()->getParamType(0));
+ CGF.Builder.CreateCall(syncEnterFn, SyncArg);
+
+ // Register an all-paths cleanup to release the lock.
+ CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn,
+ SyncArg);
+
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
+
+ // Pop the lock-release cleanup.
+ CGF.PopCleanupBlock();
+}
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 5ad3a50..0cc2d82 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -17,7 +17,6 @@
#define CLANG_CODEGEN_OBCJRUNTIME_H
#include "clang/Basic/IdentifierTable.h" // Selector
#include "clang/AST/DeclObjC.h"
-#include <string>
#include "CGBuilder.h"
#include "CGCall.h"
@@ -87,6 +86,26 @@ protected:
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers,
llvm::Value *Offset);
+ /// Emits a try / catch statement. This function is intended to be called by
+ /// subclasses, and provides a generic mechanism for generating these, which
+ /// should be usable by all runtimes. The caller must provide the functions to
+ /// call when entering and exiting a @catch() block, and the function used to
+ /// rethrow exceptions. If the begin and end catch functions are NULL, then
+ /// the function assumes that the EH personality function provides the
+ /// thrown object directly.
+ void EmitTryCatchStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S,
+ llvm::Function *beginCatchFn,
+ llvm::Function *endCatchFn,
+ llvm::Function *exceptionRethrowFn);
+ /// Emits an @synchronize() statement, using the syncEnterFn and syncExitFn
+ /// arguments as the functions called to lock and unlock the object. This
+ /// function can be called by subclasses that use zero-cost exception
+ /// handling.
+ void EmitAtSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S,
+ llvm::Function *syncEnterFn,
+ llvm::Function *syncExitFn);
public:
virtual ~CGObjCRuntime();
@@ -118,7 +137,7 @@ public:
/// accompanying metadata) and a list of protocols.
virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
- /// Generate a class stucture for this class.
+ /// Generate a class structure for this class.
virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
/// Generate an Objective-C message send operation.
@@ -230,7 +249,6 @@ public:
//TODO: This should include some way of selecting which runtime to target.
CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM);
CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM);
-CGObjCRuntime *CreateMacNonFragileABIObjCRuntime(CodeGenModule &CGM);
}
}
#endif
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index 7ec0ee4..c73b199 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Type.h"
#include "clang/Frontend/CodeGenOptions.h"
+#include "CGObjCRuntime.h"
using namespace clang;
using namespace CodeGen;
@@ -195,7 +196,9 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::Overload:
case BuiltinType::Dependent:
- assert(false && "Should not see this type here!");
+ case BuiltinType::BoundMember:
+ case BuiltinType::UnknownAny:
+ llvm_unreachable("asking for RRTI for a placeholder type!");
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
@@ -875,14 +878,16 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
// For a non-virtual base, this is the offset in the object of the base
// subobject. For a virtual base, this is the offset in the virtual table of
// the virtual base offset for the virtual base referenced (negative).
+ CharUnits Offset;
if (Base->isVirtual())
- OffsetFlags = CGM.getVTables().getVirtualBaseOffsetOffset(RD, BaseDecl);
+ Offset =
+ CGM.getVTables().getVirtualBaseOffsetOffset(RD, BaseDecl);
else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- OffsetFlags = Layout.getBaseClassOffsetInBits(BaseDecl) / 8;
+ Offset = Layout.getBaseClassOffset(BaseDecl);
};
- OffsetFlags <<= 8;
+ OffsetFlags = Offset.getQuantity() << 8;
// The low-order byte of __offset_flags contains flags, as given by the
// masks from the enumeration __offset_flags_masks.
@@ -977,6 +982,10 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
return llvm::Constant::getNullValue(Int8PtrTy);
}
+
+ if (ForEH && Ty->isObjCObjectPointerType() && !Features.NeXTRuntime) {
+ return Runtime->GetEHType(Ty);
+ }
return RTTIBuilder(*this).BuildTypeInfo(Ty);
}
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index 30da05f..6d9fc05 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -12,6 +12,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/DerivedTypes.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
namespace llvm {
class raw_ostream;
@@ -53,7 +54,7 @@ public:
/// unused as the cleanest IR comes from having a well-constructed LLVM type
/// with proper GEP instructions, but sometimes its use is required, for
/// example if an access is intended to straddle an LLVM field boundary.
- unsigned FieldByteOffset;
+ CharUnits FieldByteOffset;
/// Bit offset in the accessed value to use. The width is implied by \see
/// TargetBitWidth.
@@ -68,7 +69,7 @@ public:
// FIXME: Remove use of 0 to encode default, instead have IRgen do the right
// thing when it generates the code, if avoiding align directives is
// desired.
- unsigned AccessAlignment;
+ CharUnits AccessAlignment;
/// Offset for the target value.
unsigned TargetBitOffset;
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index ceae66f..a4ac390 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -34,7 +34,7 @@ class CGRecordLayoutBuilder {
public:
/// FieldTypes - Holds the LLVM types that the struct is created from.
///
- std::vector<const llvm::Type *> FieldTypes;
+ llvm::SmallVector<const llvm::Type *, 16> FieldTypes;
/// BaseSubobjectType - Holds the LLVM type for the non-virtual part
/// of the struct. For example, consider:
@@ -77,13 +77,26 @@ public:
/// Packed - Whether the resulting LLVM struct will be packed or not.
bool Packed;
+
+ /// IsMsStruct - Whether ms_struct is in effect or not
+ bool IsMsStruct;
private:
CodeGenTypes &Types;
+ /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
+ /// last base laid out. Used so that we can replace the last laid out base
+ /// type with an i8 array if needed.
+ struct LastLaidOutBaseInfo {
+ CharUnits Offset;
+ CharUnits NonVirtualSize;
+
+ bool isValid() const { return !NonVirtualSize.isZero(); }
+ void invalidate() { NonVirtualSize = CharUnits::Zero(); }
+
+ } LastLaidOutBase;
+
/// Alignment - Contains the alignment of the RecordDecl.
- //
- // FIXME: This is not needed and should be removed.
CharUnits Alignment;
/// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
@@ -143,6 +156,12 @@ private:
/// struct size is a multiple of the field alignment.
void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
+ /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
+ /// tail padding of a previous base. If this happens, the type of the previous
+ /// base needs to be changed to an array of i8. Returns true if the last
+ /// laid out base was resized.
+ bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
+
/// getByteArrayType - Returns a byte array type with the given number of
/// elements.
const llvm::Type *getByteArrayType(CharUnits NumBytes);
@@ -152,7 +171,7 @@ private:
/// AppendTailPadding - Append enough tail padding so that the type will have
/// the passed size.
- void AppendTailPadding(uint64_t RecordSize);
+ void AppendTailPadding(CharUnits RecordSize);
CharUnits getTypeAlignment(const llvm::Type *Ty) const;
@@ -168,7 +187,8 @@ public:
CGRecordLayoutBuilder(CodeGenTypes &Types)
: BaseSubobjectType(0),
IsZeroInitializable(true), IsZeroInitializableAsBase(true),
- Packed(false), Types(Types), BitsAvailableInLastField(0) { }
+ Packed(false), IsMsStruct(false),
+ Types(Types), BitsAvailableInLastField(0) { }
/// Layout - Will layout a RecordDecl.
void Layout(const RecordDecl *D);
@@ -179,6 +199,8 @@ public:
void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
Packed = D->hasAttr<PackedAttr>();
+
+ IsMsStruct = D->hasAttr<MsStructAttr>();
if (D->isUnion()) {
LayoutUnion(D);
@@ -190,6 +212,7 @@ void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
// We weren't able to layout the struct. Try again with a packed struct
Packed = true;
+ LastLaidOutBase.invalidate();
NextFieldOffset = CharUnits::Zero();
FieldTypes.clear();
Fields.clear();
@@ -242,7 +265,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
CGBitFieldInfo::AccessInfo Components[3];
unsigned NumComponents = 0;
- unsigned AccessedTargetBits = 0; // The tumber of target bits accessed.
+ unsigned AccessedTargetBits = 0; // The number of target bits accessed.
unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
// Round down from the field offset to find the first access position that is
@@ -292,13 +315,15 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
// in higher bits. But this also reverts the bytes, so fix this here by reverting
// the byte offset on big-endian machines.
if (Types.getTargetData().isBigEndian()) {
- AI.FieldByteOffset = (ContainingTypeSizeInBits - AccessStart - AccessWidth )/8;
+ AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
+ ContainingTypeSizeInBits - AccessStart - AccessWidth);
} else {
- AI.FieldByteOffset = AccessStart / 8;
+ AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
}
AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
AI.AccessWidth = AccessWidth;
- AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
+ AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
+ llvm::MinAlign(ContainingTypeAlign, AccessStart));
AI.TargetBitOffset = AccessedTargetBits;
AI.TargetBitWidth = AccessBitsInFieldSize;
@@ -332,34 +357,51 @@ void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
return;
uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
- unsigned numBytesToAppend;
+ CharUnits numBytesToAppend;
+ unsigned charAlign = Types.getContext().Target.getCharAlign();
+
+ if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
+ assert(fieldOffset % charAlign == 0 &&
+ "Field offset not aligned correctly");
+
+ CharUnits fieldOffsetInCharUnits =
+ Types.getContext().toCharUnitsFromBits(fieldOffset);
+
+ // Try to resize the last base field.
+ if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
+ nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
+ }
if (fieldOffset < nextFieldOffsetInBits) {
assert(BitsAvailableInLastField && "Bitfield size mismatch!");
assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
// The bitfield begins in the previous bit-field.
- numBytesToAppend =
- llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField, 8) / 8;
+ numBytesToAppend = Types.getContext().toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField,
+ charAlign));
} else {
- assert(fieldOffset % 8 == 0 && "Field offset not aligned correctly");
+ assert(fieldOffset % charAlign == 0 &&
+ "Field offset not aligned correctly");
// Append padding if necessary.
- AppendPadding(CharUnits::fromQuantity(fieldOffset / 8), CharUnits::One());
+ AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset),
+ CharUnits::One());
- numBytesToAppend = llvm::RoundUpToAlignment(fieldSize, 8) / 8;
+ numBytesToAppend = Types.getContext().toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(fieldSize, charAlign));
- assert(numBytesToAppend && "No bytes to append!");
+ assert(!numBytesToAppend.isZero() && "No bytes to append!");
}
// Add the bit field info.
BitFields.insert(std::make_pair(D,
CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
- AppendBytes(CharUnits::fromQuantity(numBytesToAppend));
+ AppendBytes(numBytesToAppend);
BitsAvailableInLastField =
- NextFieldOffset.getQuantity() * 8 - (fieldOffset + fieldSize);
+ Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
}
bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
@@ -411,6 +453,14 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
NextFieldOffset.RoundUpToAlignment(typeAlignment);
if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
+ // Try to resize the last base field.
+ if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
+ alignedNextFieldOffsetInBytes =
+ NextFieldOffset.RoundUpToAlignment(typeAlignment);
+ }
+ }
+
+ if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
assert(!Packed && "Could not place field even with packed struct!");
return false;
}
@@ -421,6 +471,7 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
Fields[D] = FieldTypes.size();
AppendField(fieldOffsetInBytes, Ty);
+ LastLaidOutBase.invalidate();
return true;
}
@@ -436,11 +487,12 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
return 0;
const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
- unsigned NumBytesToAppend =
- llvm::RoundUpToAlignment(FieldSize, 8) / 8;
+ CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(FieldSize,
+ Types.getContext().Target.getCharAlign()));
- if (NumBytesToAppend > 1)
- FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
+ if (NumBytesToAppend > CharUnits::One())
+ FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
// Add the bit field info.
BitFields.insert(std::make_pair(Field,
@@ -516,11 +568,16 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
const CGRecordLayout &baseLayout,
CharUnits baseOffset) {
+ ResizeLastBaseFieldIfNecessary(baseOffset);
+
AppendPadding(baseOffset, CharUnits::One());
const ASTRecordLayout &baseASTLayout
= Types.getContext().getASTRecordLayout(base);
+ LastLaidOutBase.Offset = NextFieldOffset;
+ LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
+
// Fields and bases can be laid out in the tail padding of previous
// bases. If this happens, we need to allocate the base as an i8
// array; otherwise, we can use the subobject type. However,
@@ -529,20 +586,10 @@ void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
// approximation, which is to use the base subobject type if it
// has the same LLVM storage size as the nvsize.
- // The nvsize, i.e. the unpadded size of the base class.
- CharUnits nvsize = baseASTLayout.getNonVirtualSize();
-
-#if 0
const llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
- const llvm::StructLayout *baseLLVMLayout =
- Types.getTargetData().getStructLayout(subobjectType);
- CharUnits stsize = CharUnits::fromQuantity(baseLLVMLayout->getSizeInBytes());
+ AppendField(baseOffset, subobjectType);
- if (nvsize == stsize)
- AppendField(baseOffset, subobjectType);
- else
-#endif
- AppendBytes(nvsize);
+ Types.addBaseSubobjectTypeName(base, baseLayout);
}
void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
@@ -698,9 +745,22 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
LayoutNonVirtualBases(RD, Layout);
unsigned FieldNo = 0;
-
+ const FieldDecl *LastFD = 0;
+
for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ const FieldDecl *FD = (*Field);
+ if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD) ||
+ Types.getContext().ZeroBitfieldFollowsBitfield(FD, LastFD)) {
+ --FieldNo;
+ continue;
+ }
+ LastFD = FD;
+ }
+
if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
assert(!Packed &&
"Could not layout fields even with a packed LLVM struct!");
@@ -724,27 +784,25 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
}
// Append tail padding if necessary.
- AppendTailPadding(Types.getContext().toBits(Layout.getSize()));
+ AppendTailPadding(Layout.getSize());
return true;
}
-void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
- assert(RecordSize % 8 == 0 && "Invalid record size!");
+void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
+ ResizeLastBaseFieldIfNecessary(RecordSize);
- CharUnits RecordSizeInBytes =
- Types.getContext().toCharUnitsFromBits(RecordSize);
- assert(NextFieldOffset <= RecordSizeInBytes && "Size mismatch!");
+ assert(NextFieldOffset <= RecordSize && "Size mismatch!");
CharUnits AlignedNextFieldOffset =
NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
- if (AlignedNextFieldOffset == RecordSizeInBytes) {
+ if (AlignedNextFieldOffset == RecordSize) {
// We don't need any padding.
return;
}
- CharUnits NumPadBytes = RecordSizeInBytes - NextFieldOffset;
+ CharUnits NumPadBytes = RecordSize - NextFieldOffset;
AppendBytes(NumPadBytes);
}
@@ -777,6 +835,24 @@ void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
}
}
+bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
+ // Check if we have a base to resize.
+ if (!LastLaidOutBase.isValid())
+ return false;
+
+ // This offset does not overlap with the tail padding.
+ if (offset >= NextFieldOffset)
+ return false;
+
+ // Restore the field offset and append an i8 array instead.
+ FieldTypes.pop_back();
+ NextFieldOffset = LastLaidOutBase.Offset;
+ AppendBytes(LastLaidOutBase.NonVirtualSize);
+ LastLaidOutBase.invalidate();
+
+ return true;
+}
+
const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
@@ -903,6 +979,8 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
RecordDecl::field_iterator it = D->field_begin();
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = D->hasAttr<MsStructAttr>();
for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
const FieldDecl *FD = *it;
@@ -912,13 +990,27 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
unsigned FieldNo = RL->getLLVMFieldNo(FD);
assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
"Invalid field offset!");
+ LastFD = FD;
continue;
}
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD) ||
+ getContext().ZeroBitfieldFollowsBitfield(FD, LastFD)) {
+ --i;
+ continue;
+ }
+ LastFD = FD;
+ }
+
// Ignore unnamed bit-fields.
- if (!FD->getDeclName())
+ if (!FD->getDeclName()) {
+ LastFD = FD;
continue;
-
+ }
+
const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
@@ -926,7 +1018,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
// Verify that every component access is within the structure.
uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
uint64_t AccessBitOffset = FieldOffset +
- getContext().toBits(CharUnits::fromQuantity(AI.FieldByteOffset));
+ getContext().toBits(AI.FieldByteOffset);
assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
"Invalid bit-field access (out of range)!");
}
@@ -985,11 +1077,11 @@ void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
OS.indent(8);
OS << "<AccessInfo"
<< " FieldIndex:" << AI.FieldIndex
- << " FieldByteOffset:" << AI.FieldByteOffset
+ << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
<< " FieldBitStart:" << AI.FieldBitStart
<< " AccessWidth:" << AI.AccessWidth << "\n";
OS.indent(8 + strlen("<AccessInfo"));
- OS << " AccessAlignment:" << AI.AccessAlignment
+ OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
<< " TargetBitOffset:" << AI.TargetBitOffset
<< " TargetBitWidth:" << AI.TargetBitWidth
<< ">\n";
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index cd23811..99bc3f4 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -72,6 +72,8 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
switch (S->getStmtClass()) {
case Stmt::NoStmtClass:
case Stmt::CXXCatchStmtClass:
+ case Stmt::SEHExceptStmtClass:
+ case Stmt::SEHFinallyStmtClass:
llvm_unreachable("invalid statement class to emit generically");
case Stmt::NullStmtClass:
case Stmt::CompoundStmtClass:
@@ -153,6 +155,11 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::CXXTryStmtClass:
EmitCXXTryStmt(cast<CXXTryStmt>(*S));
break;
+ case Stmt::CXXForRangeStmtClass:
+ EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
+ case Stmt::SEHTryStmtClass:
+ // FIXME Not yet implemented
+ break;
}
}
@@ -359,10 +366,12 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm of the if/else.
- if (int Cond = ConstantFoldsToSimpleInteger(S.getCond())) {
+ bool CondConstant;
+ if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) {
// Figure out which block (then or else) is executed.
- const Stmt *Executed = S.getThen(), *Skipped = S.getElse();
- if (Cond == -1) // Condition false?
+ const Stmt *Executed = S.getThen();
+ const Stmt *Skipped = S.getElse();
+ if (!CondConstant) // Condition false?
std::swap(Executed, Skipped);
// If the skipped block has no labels in it, just emit the executed block.
@@ -395,11 +404,17 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// Emit the 'else' code if present.
if (const Stmt *Else = S.getElse()) {
+ // There is no need to emit line number for unconditional branch.
+ if (getDebugInfo())
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
EmitBlock(ElseBlock);
{
RunCleanupsScope ElseScope(*this);
EmitStmt(Else);
}
+ // There is no need to emit line number for unconditional branch.
+ if (getDebugInfo())
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
EmitBranch(ContBlock);
}
@@ -628,6 +643,80 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
EmitBlock(LoopExit.getBlock(), true);
}
+void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ RunCleanupsScope ForScope(*this);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getBegin());
+ DI->EmitRegionStart(Builder);
+ }
+
+ // Evaluate the first pieces before the loop.
+ EmitStmt(S.getRangeStmt());
+ EmitStmt(S.getBeginEndStmt());
+
+ // Start the loop with a block that tests the condition.
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ EmitBlock(CondBlock);
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
+
+ // The loop body, consisting of the specified body and the loop variable.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // The body is executed if the expression, contextually converted
+ // to bool, is true.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+
+ EmitBlock(ForBody);
+
+ // Create a block for the increment. In case of a 'continue', we jump there.
+ JumpDest Continue = getJumpDestInCurrentScope("for.inc");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ {
+ // Create a separate cleanup scope for the loop variable and body.
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getLoopVarStmt());
+ EmitStmt(S.getBody());
+ }
+
+ // If there is an increment, emit it next.
+ EmitBlock(Continue.getBlock());
+ EmitStmt(S.getInc());
+
+ BreakContinueStack.pop_back();
+
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getEnd());
+ DI->EmitRegionEnd(Builder);
+ }
+
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock(), true);
+}
+
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
if (RV.isScalar()) {
Builder.CreateStore(RV.getScalarVal(), ReturnValue);
@@ -745,8 +834,7 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
// Range is small enough to add multiple switch instruction cases.
for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
- SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), LHS),
- CaseDest);
+ SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
LHS++;
}
return;
@@ -767,11 +855,9 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
// Emit range check.
llvm::Value *Diff =
- Builder.CreateSub(SwitchInsn->getCondition(),
- llvm::ConstantInt::get(getLLVMContext(), LHS), "tmp");
+ Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS), "tmp");
llvm::Value *Cond =
- Builder.CreateICmpULE(Diff, llvm::ConstantInt::get(getLLVMContext(), Range),
- "inbounds");
+ Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
Builder.CreateCondBr(Cond, CaseDest, FalseDest);
// Restore the appropriate insertion point.
@@ -782,16 +868,37 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
}
void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+ // Handle case ranges.
if (S.getRHS()) {
EmitCaseStmtRange(S);
return;
}
+ llvm::ConstantInt *CaseVal =
+ Builder.getInt(S.getLHS()->EvaluateAsInt(getContext()));
+
+ // If the body of the case is just a 'break', and if there was no fallthrough,
+ // try to not emit an empty block.
+ if (isa<BreakStmt>(S.getSubStmt())) {
+ JumpDest Block = BreakContinueStack.back().BreakBlock;
+
+ // Only do this optimization if there are no cleanups that need emitting.
+ if (isObviouslyBranchWithoutCleanups(Block)) {
+ SwitchInsn->addCase(CaseVal, Block.getBlock());
+
+ // If there was a fallthrough into this case, make sure to redirect it to
+ // the end of the switch as well.
+ if (Builder.GetInsertBlock()) {
+ Builder.CreateBr(Block.getBlock());
+ Builder.ClearInsertionPoint();
+ }
+ return;
+ }
+ }
+
EmitBlock(createBasicBlock("sw.bb"));
llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
- llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
- SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), CaseVal),
- CaseDest);
+ SwitchInsn->addCase(CaseVal, CaseDest);
// Recursively emitting the statement is acceptable, but is not wonderful for
// code where we have many case statements nested together, i.e.:
@@ -805,13 +912,12 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
const CaseStmt *CurCase = &S;
const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
- // Otherwise, iteratively add consequtive cases to this switch stmt.
+ // Otherwise, iteratively add consecutive cases to this switch stmt.
while (NextCase && NextCase->getRHS() == 0) {
CurCase = NextCase;
- CaseVal = CurCase->getLHS()->EvaluateAsInt(getContext());
- SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), CaseVal),
- CaseDest);
-
+ llvm::ConstantInt *CaseVal =
+ Builder.getInt(CurCase->getLHS()->EvaluateAsInt(getContext()));
+ SwitchInsn->addCase(CaseVal, CaseDest);
NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
}
@@ -827,6 +933,207 @@ void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
EmitStmt(S.getSubStmt());
}
+/// CollectStatementsForCase - Given the body of a 'switch' statement and a
+/// constant value that is being switched on, see if we can dead code eliminate
+/// the body of the switch to a simple series of statements to emit. Basically,
+/// on a switch (5) we want to find these statements:
+/// case 5:
+/// printf(...); <--
+/// ++i; <--
+/// break;
+///
+/// and add them to the ResultStmts vector. If it is unsafe to do this
+/// transformation (for example, one of the elided statements contains a label
+/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
+/// should include statements after it (e.g. the printf() line is a substmt of
+/// the case) then return CSFC_FallThrough. If we handled it and found a break
+/// statement, then return CSFC_Success.
+///
+/// If Case is non-null, then we are looking for the specified case, checking
+/// that nothing we jump over contains labels. If Case is null, then we found
+/// the case and are looking for the break.
+///
+/// If the recursive walk actually finds our Case, then we set FoundCase to
+/// true.
+///
+enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
+static CSFC_Result CollectStatementsForCase(const Stmt *S,
+ const SwitchCase *Case,
+ bool &FoundCase,
+ llvm::SmallVectorImpl<const Stmt*> &ResultStmts) {
+ // If this is a null statement, just succeed.
+ if (S == 0)
+ return Case ? CSFC_Success : CSFC_FallThrough;
+
+ // If this is the switchcase (case 4: or default) that we're looking for, then
+ // we're in business. Just add the substatement.
+ if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
+ if (S == Case) {
+ FoundCase = true;
+ return CollectStatementsForCase(SC->getSubStmt(), 0, FoundCase,
+ ResultStmts);
+ }
+
+ // Otherwise, this is some other case or default statement, just ignore it.
+ return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
+ ResultStmts);
+ }
+
+ // If we are in the live part of the code and we found our break statement,
+ // return a success!
+ if (Case == 0 && isa<BreakStmt>(S))
+ return CSFC_Success;
+
+ // If this is a switch statement, then it might contain the SwitchCase, the
+ // break, or neither.
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
+ // Handle this as two cases: we might be looking for the SwitchCase (if so
+ // the skipped statements must be skippable) or we might already have it.
+ CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
+ if (Case) {
+ // Keep track of whether we see a skipped declaration. The code could be
+ // using the declaration even if it is skipped, so we can't optimize out
+ // the decl if the kept statements might refer to it.
+ bool HadSkippedDecl = false;
+
+ // If we're looking for the case, just see if we can skip each of the
+ // substatements.
+ for (; Case && I != E; ++I) {
+ HadSkippedDecl |= isa<DeclStmt>(I);
+
+ switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
+ case CSFC_Failure: return CSFC_Failure;
+ case CSFC_Success:
+ // A successful result means that either 1) that the statement doesn't
+ // have the case and is skippable, or 2) does contain the case value
+ // and also contains the break to exit the switch. In the later case,
+ // we just verify the rest of the statements are elidable.
+ if (FoundCase) {
+ // If we found the case and skipped declarations, we can't do the
+ // optimization.
+ if (HadSkippedDecl)
+ return CSFC_Failure;
+
+ for (++I; I != E; ++I)
+ if (CodeGenFunction::ContainsLabel(*I, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+ break;
+ case CSFC_FallThrough:
+ // If we have a fallthrough condition, then we must have found the
+ // case started to include statements. Consider the rest of the
+ // statements in the compound statement as candidates for inclusion.
+ assert(FoundCase && "Didn't find case but returned fallthrough?");
+ // We recursively found Case, so we're not looking for it anymore.
+ Case = 0;
+
+ // If we found the case and skipped declarations, we can't do the
+ // optimization.
+ if (HadSkippedDecl)
+ return CSFC_Failure;
+ break;
+ }
+ }
+ }
+
+ // If we have statements in our range, then we know that the statements are
+ // live and need to be added to the set of statements we're tracking.
+ for (; I != E; ++I) {
+ switch (CollectStatementsForCase(*I, 0, FoundCase, ResultStmts)) {
+ case CSFC_Failure: return CSFC_Failure;
+ case CSFC_FallThrough:
+ // A fallthrough result means that the statement was simple and just
+ // included in ResultStmt, keep adding them afterwards.
+ break;
+ case CSFC_Success:
+ // A successful result means that we found the break statement and
+ // stopped statement inclusion. We just ensure that any leftover stmts
+ // are skippable and return success ourselves.
+ for (++I; I != E; ++I)
+ if (CodeGenFunction::ContainsLabel(*I, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+ }
+
+ return Case ? CSFC_Success : CSFC_FallThrough;
+ }
+
+ // Okay, this is some other statement that we don't handle explicitly, like a
+ // for statement or increment etc. If we are skipping over this statement,
+ // just verify it doesn't have labels, which would make it invalid to elide.
+ if (Case) {
+ if (CodeGenFunction::ContainsLabel(S, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+
+ // Otherwise, we want to include this statement. Everything is cool with that
+ // so long as it doesn't contain a break out of the switch we're in.
+ if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
+
+ // Otherwise, everything is great. Include the statement and tell the caller
+ // that we fall through and include the next statement as well.
+ ResultStmts.push_back(S);
+ return CSFC_FallThrough;
+}
+
+/// FindCaseStatementsForValue - Find the case statement being jumped to and
+/// then invoke CollectStatementsForCase to find the list of statements to emit
+/// for a switch on constant. See the comment above CollectStatementsForCase
+/// for more details.
+static bool FindCaseStatementsForValue(const SwitchStmt &S,
+ const llvm::APInt &ConstantCondValue,
+ llvm::SmallVectorImpl<const Stmt*> &ResultStmts,
+ ASTContext &C) {
+ // First step, find the switch case that is being branched to. We can do this
+ // efficiently by scanning the SwitchCase list.
+ const SwitchCase *Case = S.getSwitchCaseList();
+ const DefaultStmt *DefaultCase = 0;
+
+ for (; Case; Case = Case->getNextSwitchCase()) {
+ // It's either a default or case. Just remember the default statement in
+ // case we're not jumping to any numbered cases.
+ if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
+ DefaultCase = DS;
+ continue;
+ }
+
+ // Check to see if this case is the one we're looking for.
+ const CaseStmt *CS = cast<CaseStmt>(Case);
+ // Don't handle case ranges yet.
+ if (CS->getRHS()) return false;
+
+ // If we found our case, remember it as 'case'.
+ if (CS->getLHS()->EvaluateAsInt(C) == ConstantCondValue)
+ break;
+ }
+
+ // If we didn't find a matching case, we use a default if it exists, or we
+ // elide the whole switch body!
+ if (Case == 0) {
+ // It is safe to elide the body of the switch if it doesn't contain labels
+ // etc. If it is safe, return successfully with an empty ResultStmts list.
+ if (DefaultCase == 0)
+ return !CodeGenFunction::ContainsLabel(&S);
+ Case = DefaultCase;
+ }
+
+ // Ok, we know which case is being jumped to, try to collect all the
+ // statements that follow it. This can fail for a variety of reasons. Also,
+ // check to see that the recursive walk actually found our case statement.
+ // Insane cases like this can fail to find it in the recursive walk since we
+ // don't handle every stmt kind:
+ // switch (4) {
+ // while (1) {
+ // case 4: ...
+ bool FoundCase = false;
+ return CollectStatementsForCase(S.getBody(), Case, FoundCase,
+ ResultStmts) != CSFC_Failure &&
+ FoundCase;
+}
+
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
@@ -835,6 +1142,23 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
if (S.getConditionVariable())
EmitAutoVarDecl(*S.getConditionVariable());
+ // See if we can constant fold the condition of the switch and therefore only
+ // emit the live case statement (if any) of the switch.
+ llvm::APInt ConstantCondValue;
+ if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
+ llvm::SmallVector<const Stmt*, 4> CaseStmts;
+ if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
+ getContext())) {
+ RunCleanupsScope ExecutedScope(*this);
+
+ // Okay, we can dead code eliminate everything except this case. Emit the
+ // specified series of statements and we're good.
+ for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
+ EmitStmt(CaseStmts[i]);
+ return;
+ }
+ }
+
llvm::Value *CondV = EmitScalarExpr(S.getCond());
// Handle nested switch statements.
@@ -1031,7 +1355,7 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
}
}
- return llvm::MDNode::get(CGF.getLLVMContext(), Locs.data(), Locs.size());
+ return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
}
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index 78b2dbe..a6849f8 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -53,6 +53,10 @@ class VTTBuilder {
/// GenerateDefinition - Whether the VTT builder should generate LLVM IR for
/// the VTT.
bool GenerateDefinition;
+
+ /// The linkage to use for any construction vtables required by this VTT.
+ /// Only required if we're building a definition.
+ llvm::GlobalVariable::LinkageTypes LinkageForConstructionVTables;
/// GetAddrOfVTable - Returns the address of the vtable for the base class in
/// the given vtable class.
@@ -109,7 +113,9 @@ class VTTBuilder {
public:
VTTBuilder(CodeGenModule &CGM, const CXXRecordDecl *MostDerivedClass,
- bool GenerateDefinition);
+ bool GenerateDefinition,
+ llvm::GlobalVariable::LinkageTypes LinkageForConstructionVTables
+ = (llvm::GlobalVariable::LinkageTypes) -1);
// getVTTComponents - Returns a reference to the VTT components.
const VTTComponentsVectorTy &getVTTComponents() const {
@@ -132,13 +138,19 @@ public:
VTTBuilder::VTTBuilder(CodeGenModule &CGM,
const CXXRecordDecl *MostDerivedClass,
- bool GenerateDefinition)
+ bool GenerateDefinition,
+ llvm::GlobalVariable::LinkageTypes LinkageForConstructionVTables)
: CGM(CGM), MostDerivedClass(MostDerivedClass),
MostDerivedClassLayout(CGM.getContext().getASTRecordLayout(MostDerivedClass)),
- GenerateDefinition(GenerateDefinition) {
+ GenerateDefinition(GenerateDefinition),
+ LinkageForConstructionVTables(LinkageForConstructionVTables) {
+ assert(!GenerateDefinition ||
+ LinkageForConstructionVTables
+ != (llvm::GlobalVariable::LinkageTypes) -1);
// Lay out this VTT.
- LayoutVTT(BaseSubobject(MostDerivedClass, 0), /*BaseIsVirtual=*/false);
+ LayoutVTT(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ /*BaseIsVirtual=*/false);
}
llvm::Constant *
@@ -148,7 +160,7 @@ VTTBuilder::GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual,
return 0;
if (Base.getBase() == MostDerivedClass) {
- assert(Base.getBaseOffset() == 0 &&
+ assert(Base.getBaseOffset().isZero() &&
"Most derived class vtable must have a zero offset!");
// This is a regular vtable.
return CGM.getVTables().GetAddrOfVTable(MostDerivedClass);
@@ -156,6 +168,7 @@ VTTBuilder::GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual,
return CGM.getVTables().GenerateConstructionVTable(MostDerivedClass,
Base, BaseIsVirtual,
+ LinkageForConstructionVTables,
AddressPoints);
}
@@ -217,8 +230,8 @@ void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- uint64_t BaseOffset = Base.getBaseOffset() +
- Layout.getBaseClassOffsetInBits(BaseDecl);
+ CharUnits BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
// Layout the VTT for this base.
LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
@@ -256,19 +269,19 @@ VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
bool BaseDeclIsNonVirtualPrimaryBase = false;
- uint64_t BaseOffset;
+ CharUnits BaseOffset;
if (I->isVirtual()) {
// Ignore virtual bases that we've already visited.
if (!VBases.insert(BaseDecl))
continue;
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
BaseDeclIsMorallyVirtual = true;
} else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- BaseOffset =
- Base.getBaseOffset() + Layout.getBaseClassOffsetInBits(BaseDecl);
+ BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
if (!Layout.isPrimaryBaseVirtual() &&
Layout.getPrimaryBase() == BaseDecl)
@@ -283,8 +296,8 @@ VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
if (!BaseDeclIsNonVirtualPrimaryBase &&
(BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) {
// Add the vtable pointer.
- AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTable, VTableClass,
- AddressPoints);
+ AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTable,
+ VTableClass, AddressPoints);
}
// And lay out the secondary virtual pointers for the base class.
@@ -316,8 +329,8 @@ void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
if (!VBases.insert(BaseDecl))
continue;
- uint64_t BaseOffset =
- MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
+ CharUnits BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
}
@@ -370,7 +383,7 @@ void
CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
llvm::GlobalVariable::LinkageTypes Linkage,
const CXXRecordDecl *RD) {
- VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/true);
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/true, Linkage);
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
const llvm::ArrayType *ArrayType =
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 891697f..581467c 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -43,15 +43,16 @@ struct BaseOffset {
/// (Or the offset from the virtual base class to the base class, if the
/// path from the derived class to the base class involves a virtual base
/// class.
- int64_t NonVirtualOffset;
+ CharUnits NonVirtualOffset;
- BaseOffset() : DerivedClass(0), VirtualBase(0), NonVirtualOffset(0) { }
+ BaseOffset() : DerivedClass(0), VirtualBase(0),
+ NonVirtualOffset(CharUnits::Zero()) { }
BaseOffset(const CXXRecordDecl *DerivedClass,
- const CXXRecordDecl *VirtualBase, int64_t NonVirtualOffset)
+ const CXXRecordDecl *VirtualBase, CharUnits NonVirtualOffset)
: DerivedClass(DerivedClass), VirtualBase(VirtualBase),
NonVirtualOffset(NonVirtualOffset) { }
- bool isEmpty() const { return !NonVirtualOffset && !VirtualBase; }
+ bool isEmpty() const { return NonVirtualOffset.isZero() && !VirtualBase; }
};
/// FinalOverriders - Contains the final overrider member functions for all
@@ -64,9 +65,9 @@ public:
const CXXMethodDecl *Method;
/// Offset - the base offset of the overrider in the layout class.
- uint64_t Offset;
+ CharUnits Offset;
- OverriderInfo() : Method(0), Offset(0) { }
+ OverriderInfo() : Method(0), Offset(CharUnits::Zero()) { }
};
private:
@@ -77,7 +78,7 @@ private:
/// MostDerivedClassOffset - If we're building final overriders for a
/// construction vtable, this holds the offset from the layout class to the
/// most derived class.
- const uint64_t MostDerivedClassOffset;
+ const CharUnits MostDerivedClassOffset;
/// LayoutClass - The class we're using for layout information. Will be
/// different than the most derived class if the final overriders are for a
@@ -91,7 +92,7 @@ private:
/// MethodBaseOffsetPairTy - Uniquely identifies a member function
/// in a base subobject.
- typedef std::pair<const CXXMethodDecl *, uint64_t> MethodBaseOffsetPairTy;
+ typedef std::pair<const CXXMethodDecl *, CharUnits> MethodBaseOffsetPairTy;
typedef llvm::DenseMap<MethodBaseOffsetPairTy,
OverriderInfo> OverridersMapTy;
@@ -104,14 +105,14 @@ private:
/// as a record decl and a subobject number) and its offsets in the most
/// derived class as well as the layout class.
typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>,
- uint64_t> SubobjectOffsetMapTy;
+ CharUnits> SubobjectOffsetMapTy;
typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy;
/// ComputeBaseOffsets - Compute the offsets for all base subobjects of the
/// given base.
void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
- uint64_t OffsetInLayoutClass,
+ CharUnits OffsetInLayoutClass,
SubobjectOffsetMapTy &SubobjectOffsets,
SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
SubobjectCountMapTy &SubobjectCounts);
@@ -125,13 +126,13 @@ private:
public:
FinalOverriders(const CXXRecordDecl *MostDerivedClass,
- uint64_t MostDerivedClassOffset,
+ CharUnits MostDerivedClassOffset,
const CXXRecordDecl *LayoutClass);
/// getOverrider - Get the final overrider for the given method declaration in
/// the subobject with the given base offset.
OverriderInfo getOverrider(const CXXMethodDecl *MD,
- uint64_t BaseOffset) const {
+ CharUnits BaseOffset) const {
assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) &&
"Did not find overrider!");
@@ -141,7 +142,8 @@ public:
/// dump - dump the final overriders.
void dump() {
VisitedVirtualBasesSetTy VisitedVirtualBases;
- dump(llvm::errs(), BaseSubobject(MostDerivedClass, 0), VisitedVirtualBases);
+ dump(llvm::errs(), BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ VisitedVirtualBases);
}
};
@@ -149,7 +151,7 @@ public:
#define DUMP_OVERRIDERS 0
FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
- uint64_t MostDerivedClassOffset,
+ CharUnits MostDerivedClassOffset,
const CXXRecordDecl *LayoutClass)
: MostDerivedClass(MostDerivedClass),
MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass),
@@ -160,9 +162,11 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
SubobjectOffsetMapTy SubobjectOffsets;
SubobjectOffsetMapTy SubobjectLayoutClassOffsets;
SubobjectCountMapTy SubobjectCounts;
- ComputeBaseOffsets(BaseSubobject(MostDerivedClass, 0), /*IsVirtual=*/false,
- MostDerivedClassOffset, SubobjectOffsets,
- SubobjectLayoutClassOffsets, SubobjectCounts);
+ ComputeBaseOffsets(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ /*IsVirtual=*/false,
+ MostDerivedClassOffset,
+ SubobjectOffsets, SubobjectLayoutClassOffsets,
+ SubobjectCounts);
// Get the the final overriders.
CXXFinalOverriderMap FinalOverriders;
@@ -180,7 +184,7 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
SubobjectNumber)) &&
"Did not find subobject offset!");
- uint64_t BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
+ CharUnits BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
SubobjectNumber)];
assert(I->second.size() == 1 && "Final overrider is not unique!");
@@ -190,7 +194,7 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
assert(SubobjectLayoutClassOffsets.count(
std::make_pair(OverriderRD, Method.Subobject))
&& "Did not find subobject offset!");
- uint64_t OverriderOffset =
+ CharUnits OverriderOffset =
SubobjectLayoutClassOffsets[std::make_pair(OverriderRD,
Method.Subobject)];
@@ -211,7 +215,7 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
static BaseOffset ComputeBaseOffset(ASTContext &Context,
const CXXRecordDecl *DerivedRD,
const CXXBasePath &Path) {
- int64_t NonVirtualOffset = 0;
+ CharUnits NonVirtualOffset = CharUnits::Zero();
unsigned NonVirtualStart = 0;
const CXXRecordDecl *VirtualBase = 0;
@@ -240,13 +244,13 @@ static BaseOffset ComputeBaseOffset(ASTContext &Context,
const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>();
const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl());
- NonVirtualOffset += Layout.getBaseClassOffsetInBits(Base);
+ NonVirtualOffset += Layout.getBaseClassOffset(Base);
}
// FIXME: This should probably use CharUnits or something. Maybe we should
// even change the base offsets in ASTRecordLayout to be specified in
// CharUnits.
- return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset / 8);
+ return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset);
}
@@ -321,7 +325,7 @@ ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
void
FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
- uint64_t OffsetInLayoutClass,
+ CharUnits OffsetInLayoutClass,
SubobjectOffsetMapTy &SubobjectOffsets,
SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
SubobjectCountMapTy &SubobjectCounts) {
@@ -337,8 +341,7 @@ FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber))
&& "Subobject offset already exists!");
- SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] =
- Base.getBaseOffset();
+ SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] = Base.getBaseOffset();
SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] =
OffsetInLayoutClass;
@@ -348,8 +351,8 @@ FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t BaseOffset;
- uint64_t BaseOffsetInLayoutClass;
+ CharUnits BaseOffset;
+ CharUnits BaseOffsetInLayoutClass;
if (I->isVirtual()) {
// Check if we've visited this virtual base before.
if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
@@ -358,20 +361,21 @@ FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
BaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl);
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
} else {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- uint64_t Offset = Layout.getBaseClassOffsetInBits(BaseDecl);
+ CharUnits Offset = Layout.getBaseClassOffset(BaseDecl);
BaseOffset = Base.getBaseOffset() + Offset;
BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
}
- ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset), I->isVirtual(),
- BaseOffsetInLayoutClass, SubobjectOffsets,
- SubobjectLayoutClassOffsets, SubobjectCounts);
+ ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset),
+ I->isVirtual(), BaseOffsetInLayoutClass,
+ SubobjectOffsets, SubobjectLayoutClassOffsets,
+ SubobjectCounts);
}
}
@@ -389,24 +393,23 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base,
if (!BaseDecl->isPolymorphic())
continue;
- uint64_t BaseOffset;
+ CharUnits BaseOffset;
if (I->isVirtual()) {
if (!VisitedVirtualBases.insert(BaseDecl)) {
// We've visited this base before.
continue;
}
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
} else {
- BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl) +
- Base.getBaseOffset();
+ BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset();
}
dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
}
Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", ";
- Out << Base.getBaseOffset() / 8 << ")\n";
+ Out << Base.getBaseOffset().getQuantity() << ")\n";
// Now dump the overriders for this base subobject.
for (CXXRecordDecl::method_iterator I = RD->method_begin(),
@@ -420,7 +423,7 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base,
Out << " " << MD->getQualifiedNameAsString() << " - (";
Out << Overrider.Method->getQualifiedNameAsString();
- Out << ", " << ", " << Overrider.Offset / 8 << ')';
+ Out << ", " << ", " << Overrider.Offset.getQuantity() << ')';
BaseOffset Offset;
if (!Overrider.Method->isPure())
@@ -431,7 +434,7 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base,
if (Offset.VirtualBase)
Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
- Out << Offset.NonVirtualOffset << " nv]";
+ Out << Offset.NonVirtualOffset.getQuantity() << " nv]";
}
Out << "\n";
@@ -460,15 +463,15 @@ public:
CK_UnusedFunctionPointer
};
- static VTableComponent MakeVCallOffset(int64_t Offset) {
+ static VTableComponent MakeVCallOffset(CharUnits Offset) {
return VTableComponent(CK_VCallOffset, Offset);
}
- static VTableComponent MakeVBaseOffset(int64_t Offset) {
+ static VTableComponent MakeVBaseOffset(CharUnits Offset) {
return VTableComponent(CK_VBaseOffset, Offset);
}
- static VTableComponent MakeOffsetToTop(int64_t Offset) {
+ static VTableComponent MakeOffsetToTop(CharUnits Offset) {
return VTableComponent(CK_OffsetToTop, Offset);
}
@@ -510,19 +513,19 @@ public:
return (Kind)(Value & 0x7);
}
- int64_t getVCallOffset() const {
+ CharUnits getVCallOffset() const {
assert(getKind() == CK_VCallOffset && "Invalid component kind!");
return getOffset();
}
- int64_t getVBaseOffset() const {
+ CharUnits getVBaseOffset() const {
assert(getKind() == CK_VBaseOffset && "Invalid component kind!");
return getOffset();
}
- int64_t getOffsetToTop() const {
+ CharUnits getOffsetToTop() const {
assert(getKind() == CK_OffsetToTop && "Invalid component kind!");
return getOffset();
@@ -554,13 +557,13 @@ public:
}
private:
- VTableComponent(Kind ComponentKind, int64_t Offset) {
+ VTableComponent(Kind ComponentKind, CharUnits Offset) {
assert((ComponentKind == CK_VCallOffset ||
ComponentKind == CK_VBaseOffset ||
ComponentKind == CK_OffsetToTop) && "Invalid component kind!");
- assert(Offset <= ((1LL << 56) - 1) && "Offset is too big!");
+ assert(Offset.getQuantity() <= ((1LL << 56) - 1) && "Offset is too big!");
- Value = ((Offset << 3) | ComponentKind);
+ Value = ((Offset.getQuantity() << 3) | ComponentKind);
}
VTableComponent(Kind ComponentKind, uintptr_t Ptr) {
@@ -576,11 +579,11 @@ private:
Value = Ptr | ComponentKind;
}
- int64_t getOffset() const {
+ CharUnits getOffset() const {
assert((getKind() == CK_VCallOffset || getKind() == CK_VBaseOffset ||
getKind() == CK_OffsetToTop) && "Invalid component kind!");
- return Value >> 3;
+ return CharUnits::fromQuantity(Value >> 3);
}
uintptr_t getPointer() const {
@@ -608,7 +611,7 @@ private:
/// VCallOffsetMap - Keeps track of vcall offsets when building a vtable.
struct VCallOffsetMap {
- typedef std::pair<const CXXMethodDecl *, int64_t> MethodAndOffsetPairTy;
+ typedef std::pair<const CXXMethodDecl *, CharUnits> MethodAndOffsetPairTy;
/// Offsets - Keeps track of methods and their offsets.
// FIXME: This should be a real map and not a vector.
@@ -623,11 +626,11 @@ public:
/// AddVCallOffset - Adds a vcall offset to the map. Returns true if the
/// add was successful, or false if there was already a member function with
/// the same signature in the map.
- bool AddVCallOffset(const CXXMethodDecl *MD, int64_t OffsetOffset);
+ bool AddVCallOffset(const CXXMethodDecl *MD, CharUnits OffsetOffset);
/// getVCallOffsetOffset - Returns the vcall offset offset (relative to the
/// vtable address point) for the given virtual member function.
- int64_t getVCallOffsetOffset(const CXXMethodDecl *MD);
+ CharUnits getVCallOffsetOffset(const CXXMethodDecl *MD);
// empty - Return whether the offset map is empty or not.
bool empty() const { return Offsets.empty(); }
@@ -677,7 +680,7 @@ bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
}
bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
- int64_t OffsetOffset) {
+ CharUnits OffsetOffset) {
// Check if we can reuse an offset.
for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
@@ -689,7 +692,7 @@ bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
return true;
}
-int64_t VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
+CharUnits VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
// Look for an offset.
for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
@@ -697,13 +700,13 @@ int64_t VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
}
assert(false && "Should always find a vcall offset offset!");
- return 0;
+ return CharUnits::Zero();
}
/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets.
class VCallAndVBaseOffsetBuilder {
public:
- typedef llvm::DenseMap<const CXXRecordDecl *, int64_t>
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
VBaseOffsetOffsetsMapTy;
private:
@@ -741,24 +744,25 @@ private:
/// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the
/// given base subobject.
void AddVCallAndVBaseOffsets(BaseSubobject Base, bool BaseIsVirtual,
- uint64_t RealBaseOffset);
+ CharUnits RealBaseOffset);
/// AddVCallOffsets - Add vcall offsets for the given base subobject.
- void AddVCallOffsets(BaseSubobject Base, uint64_t VBaseOffset);
+ void AddVCallOffsets(BaseSubobject Base, CharUnits VBaseOffset);
/// AddVBaseOffsets - Add vbase offsets for the given class.
- void AddVBaseOffsets(const CXXRecordDecl *Base, uint64_t OffsetInLayoutClass);
+ void AddVBaseOffsets(const CXXRecordDecl *Base,
+ CharUnits OffsetInLayoutClass);
/// getCurrentOffsetOffset - Get the current vcall or vbase offset offset in
- /// bytes, relative to the vtable address point.
- int64_t getCurrentOffsetOffset() const;
+ /// chars, relative to the vtable address point.
+ CharUnits getCurrentOffsetOffset() const;
public:
VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
const CXXRecordDecl *LayoutClass,
const FinalOverriders *Overriders,
BaseSubobject Base, bool BaseIsVirtual,
- uint64_t OffsetInLayoutClass)
+ CharUnits OffsetInLayoutClass)
: MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
@@ -780,7 +784,7 @@ public:
void
VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
bool BaseIsVirtual,
- uint64_t RealBaseOffset) {
+ CharUnits RealBaseOffset) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base.getBase());
// Itanium C++ ABI 2.5.2:
@@ -795,7 +799,7 @@ VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
bool PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
- uint64_t PrimaryBaseOffset;
+ CharUnits PrimaryBaseOffset;
// Get the base offset of the primary base.
if (PrimaryBaseIsVirtual) {
@@ -806,7 +810,7 @@ VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
Context.getASTRecordLayout(MostDerivedClass);
PrimaryBaseOffset =
- MostDerivedClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
} else {
assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should have a zero offset!");
@@ -814,8 +818,9 @@ VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
PrimaryBaseOffset = Base.getBaseOffset();
}
- AddVCallAndVBaseOffsets(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
- PrimaryBaseIsVirtual, RealBaseOffset);
+ AddVCallAndVBaseOffsets(
+ BaseSubobject(PrimaryBase,PrimaryBaseOffset),
+ PrimaryBaseIsVirtual, RealBaseOffset);
}
AddVBaseOffsets(Base.getBase(), RealBaseOffset);
@@ -825,22 +830,21 @@ VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
AddVCallOffsets(Base, RealBaseOffset);
}
-int64_t VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
+CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
// OffsetIndex is the index of this vcall or vbase offset, relative to the
// vtable address point. (We subtract 3 to account for the information just
// above the address point, the RTTI info, the offset to top, and the
// vcall offset itself).
int64_t OffsetIndex = -(int64_t)(3 + Components.size());
- // FIXME: We shouldn't use / 8 here.
- int64_t OffsetOffset = OffsetIndex *
- (int64_t)Context.Target.getPointerWidth(0) / 8;
-
+ CharUnits PointerWidth =
+ Context.toCharUnitsFromBits(Context.Target.getPointerWidth(0));
+ CharUnits OffsetOffset = PointerWidth * OffsetIndex;
return OffsetOffset;
}
void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
- uint64_t VBaseOffset) {
+ CharUnits VBaseOffset) {
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -866,14 +870,14 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
if (!MD->isVirtual())
continue;
- int64_t OffsetOffset = getCurrentOffsetOffset();
+ CharUnits OffsetOffset = getCurrentOffsetOffset();
// Don't add a vcall offset if we already have one for this member function
// signature.
if (!VCallOffsets.AddVCallOffset(MD, OffsetOffset))
continue;
- int64_t Offset = 0;
+ CharUnits Offset = CharUnits::Zero();
if (Overriders) {
// Get the final overrider.
@@ -882,11 +886,11 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
/// The vcall offset is the offset from the virtual base to the object
/// where the function was overridden.
- // FIXME: We should not use / 8 here.
- Offset = (int64_t)(Overrider.Offset - VBaseOffset) / 8;
+ Offset = Overrider.Offset - VBaseOffset;
}
- Components.push_back(VTableComponent::MakeVCallOffset(Offset));
+ Components.push_back(
+ VTableComponent::MakeVCallOffset(Offset));
}
// And iterate over all non-virtual bases (ignoring the primary base).
@@ -902,15 +906,17 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
continue;
// Get the base offset of this base.
- uint64_t BaseOffset = Base.getBaseOffset() +
- Layout.getBaseClassOffsetInBits(BaseDecl);
+ CharUnits BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
- AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset), VBaseOffset);
+ AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset),
+ VBaseOffset);
}
}
-void VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
- uint64_t OffsetInLayoutClass) {
+void
+VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
+ CharUnits OffsetInLayoutClass) {
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
@@ -922,19 +928,19 @@ void VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
// Check if this is a virtual base that we haven't visited before.
if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
- // FIXME: We shouldn't use / 8 here.
- int64_t Offset =
- (int64_t)(LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl) -
- OffsetInLayoutClass) / 8;
+ CharUnits Offset =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl) - OffsetInLayoutClass;
// Add the vbase offset offset.
assert(!VBaseOffsetOffsets.count(BaseDecl) &&
"vbase offset offset already exists!");
- int64_t VBaseOffsetOffset = getCurrentOffsetOffset();
- VBaseOffsetOffsets.insert(std::make_pair(BaseDecl, VBaseOffsetOffset));
+ CharUnits VBaseOffsetOffset = getCurrentOffsetOffset();
+ VBaseOffsetOffsets.insert(
+ std::make_pair(BaseDecl, VBaseOffsetOffset));
- Components.push_back(VTableComponent::MakeVBaseOffset(Offset));
+ Components.push_back(
+ VTableComponent::MakeVBaseOffset(Offset));
}
// Check the base class looking for more vbase offsets.
@@ -950,7 +956,7 @@ public:
typedef llvm::SmallSetVector<const CXXRecordDecl *, 8>
PrimaryBasesSetVectorTy;
- typedef llvm::DenseMap<const CXXRecordDecl *, int64_t>
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
VBaseOffsetOffsetsMapTy;
typedef llvm::DenseMap<BaseSubobject, uint64_t>
@@ -966,7 +972,7 @@ private:
/// MostDerivedClassOffset - If we're building a construction vtable, this
/// holds the offset from the layout class to the most derived class.
- const uint64_t MostDerivedClassOffset;
+ const CharUnits MostDerivedClassOffset;
/// MostDerivedClassIsVirtual - Whether the most derived class is a virtual
/// base. (This only makes sense when building a construction vtable).
@@ -1001,23 +1007,26 @@ private:
/// (Used for computing 'this' pointer adjustment thunks.
struct MethodInfo {
/// BaseOffset - The base offset of this method.
- const uint64_t BaseOffset;
+ const CharUnits BaseOffset;
/// BaseOffsetInLayoutClass - The base offset in the layout class of this
/// method.
- const uint64_t BaseOffsetInLayoutClass;
+ const CharUnits BaseOffsetInLayoutClass;
/// VTableIndex - The index in the vtable that this method has.
/// (For destructors, this is the index of the complete destructor).
const uint64_t VTableIndex;
- MethodInfo(uint64_t BaseOffset, uint64_t BaseOffsetInLayoutClass,
+ MethodInfo(CharUnits BaseOffset, CharUnits BaseOffsetInLayoutClass,
uint64_t VTableIndex)
: BaseOffset(BaseOffset),
BaseOffsetInLayoutClass(BaseOffsetInLayoutClass),
VTableIndex(VTableIndex) { }
- MethodInfo() : BaseOffset(0), BaseOffsetInLayoutClass(0), VTableIndex(0) { }
+ MethodInfo()
+ : BaseOffset(CharUnits::Zero()),
+ BaseOffsetInLayoutClass(CharUnits::Zero()),
+ VTableIndex(0) { }
};
typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
@@ -1066,7 +1075,7 @@ private:
/// final overrider.
ThisAdjustment
ComputeThisAdjustment(const CXXMethodDecl *MD,
- uint64_t BaseOffsetInLayoutClass,
+ CharUnits BaseOffsetInLayoutClass,
FinalOverriders::OverriderInfo Overrider);
/// AddMethod - Add a single virtual member function to the vtable
@@ -1093,16 +1102,16 @@ private:
/// C-in-D's copy of A's vtable is never referenced, so this is not
/// necessary.
bool IsOverriderUsed(const CXXMethodDecl *Overrider,
- uint64_t BaseOffsetInLayoutClass,
+ CharUnits BaseOffsetInLayoutClass,
const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
- uint64_t FirstBaseOffsetInLayoutClass) const;
+ CharUnits FirstBaseOffsetInLayoutClass) const;
/// AddMethods - Add the methods of this base subobject and all its
/// primary bases to the vtable components vector.
- void AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
+ void AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
- uint64_t FirstBaseOffsetInLayoutClass,
+ CharUnits FirstBaseOffsetInLayoutClass,
PrimaryBasesSetVectorTy &PrimaryBases);
// LayoutVTable - Layout the vtable for the given base class, including its
@@ -1120,7 +1129,7 @@ private:
void LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
bool BaseIsMorallyVirtual,
bool BaseIsVirtualInLayoutClass,
- uint64_t OffsetInLayoutClass);
+ CharUnits OffsetInLayoutClass);
/// LayoutSecondaryVTables - Layout the secondary vtables for the given base
/// subobject.
@@ -1128,12 +1137,12 @@ private:
/// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
/// or a direct or indirect base of a virtual base.
void LayoutSecondaryVTables(BaseSubobject Base, bool BaseIsMorallyVirtual,
- uint64_t OffsetInLayoutClass);
+ CharUnits OffsetInLayoutClass);
/// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
/// class hierarchy.
void DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
- uint64_t OffsetInLayoutClass,
+ CharUnits OffsetInLayoutClass,
VisitedVirtualBasesSetTy &VBases);
/// LayoutVTablesForVirtualBases - Layout vtables for all virtual bases of the
@@ -1149,8 +1158,9 @@ private:
public:
VTableBuilder(CodeGenVTables &VTables, const CXXRecordDecl *MostDerivedClass,
- uint64_t MostDerivedClassOffset, bool MostDerivedClassIsVirtual,
- const CXXRecordDecl *LayoutClass)
+ CharUnits MostDerivedClassOffset,
+ bool MostDerivedClassIsVirtual, const
+ CXXRecordDecl *LayoutClass)
: VTables(VTables), MostDerivedClass(MostDerivedClass),
MostDerivedClassOffset(MostDerivedClassOffset),
MostDerivedClassIsVirtual(MostDerivedClassIsVirtual),
@@ -1325,15 +1335,15 @@ ReturnAdjustment VTableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
if (Offset.DerivedClass == MostDerivedClass) {
// We can get the offset offset directly from our map.
Adjustment.VBaseOffsetOffset =
- VBaseOffsetOffsets.lookup(Offset.VirtualBase);
+ VBaseOffsetOffsets.lookup(Offset.VirtualBase).getQuantity();
} else {
Adjustment.VBaseOffsetOffset =
VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass,
- Offset.VirtualBase);
+ Offset.VirtualBase).getQuantity();
}
}
- Adjustment.NonVirtual = Offset.NonVirtualOffset;
+ Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
}
return Adjustment;
@@ -1360,8 +1370,7 @@ VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
I != E; ++I) {
BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, *I);
- // FIXME: Should not use * 8 here.
- uint64_t OffsetToBaseSubobject = Offset.NonVirtualOffset * 8;
+ CharUnits OffsetToBaseSubobject = Offset.NonVirtualOffset;
if (Offset.VirtualBase) {
// If we have a virtual base class, the non-virtual offset is relative
@@ -1372,7 +1381,7 @@ VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
/// Get the virtual base offset, relative to the most derived class
/// layout.
OffsetToBaseSubobject +=
- LayoutClassLayout.getVBaseClassOffsetInBits(Offset.VirtualBase);
+ LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
} else {
// Otherwise, the non-virtual offset is relative to the derived class
// offset.
@@ -1393,13 +1402,13 @@ VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
ThisAdjustment
VTableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
- uint64_t BaseOffsetInLayoutClass,
+ CharUnits BaseOffsetInLayoutClass,
FinalOverriders::OverriderInfo Overrider) {
// Ignore adjustments for pure virtual member functions.
if (Overrider.Method->isPure())
return ThisAdjustment();
- BaseSubobject OverriddenBaseSubobject(MD->getParent(),
+ BaseSubobject OverriddenBaseSubobject(MD->getParent(),
BaseOffsetInLayoutClass);
BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
@@ -1422,18 +1431,21 @@ VTableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
// build them.
VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
/*FinalOverriders=*/0,
- BaseSubobject(Offset.VirtualBase, 0),
+ BaseSubobject(Offset.VirtualBase,
+ CharUnits::Zero()),
/*BaseIsVirtual=*/true,
- /*OffsetInLayoutClass=*/0);
+ /*OffsetInLayoutClass=*/
+ CharUnits::Zero());
VCallOffsets = Builder.getVCallOffsets();
}
- Adjustment.VCallOffsetOffset = VCallOffsets.getVCallOffsetOffset(MD);
+ Adjustment.VCallOffsetOffset =
+ VCallOffsets.getVCallOffsetOffset(MD).getQuantity();
}
// Set the non-virtual part of the adjustment.
- Adjustment.NonVirtual = Offset.NonVirtualOffset;
+ Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
return Adjustment;
}
@@ -1489,9 +1501,9 @@ OverridesIndirectMethodInBases(const CXXMethodDecl *MD,
bool
VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
- uint64_t BaseOffsetInLayoutClass,
+ CharUnits BaseOffsetInLayoutClass,
const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
- uint64_t FirstBaseOffsetInLayoutClass) const {
+ CharUnits FirstBaseOffsetInLayoutClass) const {
// If the base and the first base in the primary base chain have the same
// offsets, then this overrider will be used.
if (BaseOffsetInLayoutClass == FirstBaseOffsetInLayoutClass)
@@ -1529,7 +1541,7 @@ VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
// Now check if this is the primary base that is not a primary base in the
// most derived class.
- if (LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase) !=
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
FirstBaseOffsetInLayoutClass) {
// We found it, stop walking the chain.
break;
@@ -1576,16 +1588,16 @@ FindNearestOverriddenMethod(const CXXMethodDecl *MD,
}
void
-VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
+VTableBuilder::AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
- uint64_t FirstBaseOffsetInLayoutClass,
+ CharUnits FirstBaseOffsetInLayoutClass,
PrimaryBasesSetVectorTy &PrimaryBases) {
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
- uint64_t PrimaryBaseOffset;
- uint64_t PrimaryBaseOffsetInLayoutClass;
+ CharUnits PrimaryBaseOffset;
+ CharUnits PrimaryBaseOffsetInLayoutClass;
if (Layout.isPrimaryBaseVirtual()) {
assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary vbase should have a zero offset!");
@@ -1594,13 +1606,13 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
Context.getASTRecordLayout(MostDerivedClass);
PrimaryBaseOffset =
- MostDerivedClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
PrimaryBaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
} else {
assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should have a zero offset!");
@@ -1642,8 +1654,7 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
"Did not find the overridden method!");
MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD];
- MethodInfo MethodInfo(Base.getBaseOffset(),
- BaseOffsetInLayoutClass,
+ MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
OverriddenMethodInfo.VTableIndex);
assert(!MethodInfoMap.count(MD) &&
@@ -1716,7 +1727,8 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
}
void VTableBuilder::LayoutVTable() {
- LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass, 0),
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass,
+ CharUnits::Zero()),
/*BaseIsMorallyVirtual=*/false,
MostDerivedClassIsVirtual,
MostDerivedClassOffset);
@@ -1724,7 +1736,7 @@ void VTableBuilder::LayoutVTable() {
VisitedVirtualBasesSetTy VBases;
// Determine the primary virtual bases.
- DeterminePrimaryVirtualBases(MostDerivedClass, MostDerivedClassOffset,
+ DeterminePrimaryVirtualBases(MostDerivedClass, MostDerivedClassOffset,
VBases);
VBases.clear();
@@ -1735,7 +1747,7 @@ void
VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
bool BaseIsMorallyVirtual,
bool BaseIsVirtualInLayoutClass,
- uint64_t OffsetInLayoutClass) {
+ CharUnits OffsetInLayoutClass) {
assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
// Add vcall and vbase offsets for this vtable.
@@ -1758,10 +1770,9 @@ VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
VBaseOffsetOffsets = Builder.getVBaseOffsetOffsets();
// Add the offset to top.
- // FIXME: We should not use / 8 here.
- int64_t OffsetToTop = -(int64_t)(OffsetInLayoutClass -
- MostDerivedClassOffset) / 8;
- Components.push_back(VTableComponent::MakeOffsetToTop(OffsetToTop));
+ CharUnits OffsetToTop = MostDerivedClassOffset - OffsetInLayoutClass;
+ Components.push_back(
+ VTableComponent::MakeOffsetToTop(OffsetToTop));
// Next, add the RTTI.
Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
@@ -1770,7 +1781,8 @@ VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
// Now go through all virtual member functions and add them.
PrimaryBasesSetVectorTy PrimaryBases;
- AddMethods(Base, OffsetInLayoutClass, Base.getBase(), OffsetInLayoutClass,
+ AddMethods(Base, OffsetInLayoutClass,
+ Base.getBase(), OffsetInLayoutClass,
PrimaryBases);
// Compute 'this' pointer adjustments.
@@ -1779,8 +1791,9 @@ VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
// Add all address points.
const CXXRecordDecl *RD = Base.getBase();
while (true) {
- AddressPoints.insert(std::make_pair(BaseSubobject(RD, OffsetInLayoutClass),
- AddressPoint));
+ AddressPoints.insert(std::make_pair(
+ BaseSubobject(RD, OffsetInLayoutClass),
+ AddressPoint));
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
@@ -1794,7 +1807,7 @@ VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
- if (LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase) !=
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
OffsetInLayoutClass) {
// We don't want to add this class (or any of its primary bases).
break;
@@ -1810,7 +1823,7 @@ VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
bool BaseIsMorallyVirtual,
- uint64_t OffsetInLayoutClass) {
+ CharUnits OffsetInLayoutClass) {
// Itanium C++ ABI 2.5.2:
// Following the primary virtual table of a derived class are secondary
// virtual tables for each of its proper base classes, except any primary
@@ -1844,10 +1857,11 @@ void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
}
// Get the base offset of this base.
- uint64_t RelativeBaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
- uint64_t BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
+ CharUnits RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ CharUnits BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
- uint64_t BaseOffsetInLayoutClass = OffsetInLayoutClass + RelativeBaseOffset;
+ CharUnits BaseOffsetInLayoutClass =
+ OffsetInLayoutClass + RelativeBaseOffset;
// Don't emit a secondary vtable for a primary base. We might however want
// to emit secondary vtables for other bases of this base.
@@ -1858,16 +1872,17 @@ void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
}
// Layout the primary vtable (and any secondary vtables) for this base.
- LayoutPrimaryAndSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
- BaseIsMorallyVirtual,
- /*BaseIsVirtualInLayoutClass=*/false,
- BaseOffsetInLayoutClass);
+ LayoutPrimaryAndSecondaryVTables(
+ BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual,
+ /*BaseIsVirtualInLayoutClass=*/false,
+ BaseOffsetInLayoutClass);
}
}
void
VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
- uint64_t OffsetInLayoutClass,
+ CharUnits OffsetInLayoutClass,
VisitedVirtualBasesSetTy &VBases) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -1884,8 +1899,8 @@ VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
- uint64_t PrimaryBaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
+ CharUnits PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
// We know that the base is not a primary base in the layout class if
// the base offsets are different.
@@ -1904,7 +1919,7 @@ VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t BaseOffsetInLayoutClass;
+ CharUnits BaseOffsetInLayoutClass;
if (I->isVirtual()) {
if (!VBases.insert(BaseDecl))
@@ -1914,10 +1929,10 @@ VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
Context.getASTRecordLayout(LayoutClass);
BaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl);
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
} else {
BaseOffsetInLayoutClass =
- OffsetInLayoutClass + Layout.getBaseClassOffsetInBits(BaseDecl);
+ OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl);
}
DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases);
@@ -1942,18 +1957,19 @@ VTableBuilder::LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
!PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) {
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
- uint64_t BaseOffset =
- MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
+ CharUnits BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
- uint64_t BaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl);
-
- LayoutPrimaryAndSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
- /*BaseIsMorallyVirtual=*/true,
- /*BaseIsVirtualInLayoutClass=*/true,
- BaseOffsetInLayoutClass);
+ CharUnits BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutPrimaryAndSecondaryVTables(
+ BaseSubobject(BaseDecl, BaseOffset),
+ /*BaseIsMorallyVirtual=*/true,
+ /*BaseIsVirtualInLayoutClass=*/true,
+ BaseOffsetInLayoutClass);
}
// We only need to check the base for virtual base vtables if it actually
@@ -1969,8 +1985,7 @@ void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
if (isBuildingConstructorVTable()) {
Out << "Construction vtable for ('";
Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
- // FIXME: Don't use / 8 .
- Out << MostDerivedClassOffset / 8 << ") in '";
+ Out << MostDerivedClassOffset.getQuantity() << ") in '";
Out << LayoutClass->getQualifiedNameAsString();
} else {
Out << "Vtable for '";
@@ -2002,15 +2017,21 @@ void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
switch (Component.getKind()) {
case VTableComponent::CK_VCallOffset:
- Out << "vcall_offset (" << Component.getVCallOffset() << ")";
+ Out << "vcall_offset ("
+ << Component.getVCallOffset().getQuantity()
+ << ")";
break;
case VTableComponent::CK_VBaseOffset:
- Out << "vbase_offset (" << Component.getVBaseOffset() << ")";
+ Out << "vbase_offset ("
+ << Component.getVBaseOffset().getQuantity()
+ << ")";
break;
case VTableComponent::CK_OffsetToTop:
- Out << "offset_to_top (" << Component.getOffsetToTop() << ")";
+ Out << "offset_to_top ("
+ << Component.getOffsetToTop().getQuantity()
+ << ")";
break;
case VTableComponent::CK_RTTI:
@@ -2116,11 +2137,11 @@ void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
const BaseSubobject &Base =
AddressPointsByIndex.find(NextIndex)->second;
- // FIXME: Instead of dividing by 8, we should be using CharUnits.
Out << " -- (" << Base.getBase()->getQualifiedNameAsString();
- Out << ", " << Base.getBaseOffset() / 8 << ") vtable address --\n";
+ Out << ", " << Base.getBaseOffset().getQuantity();
+ Out << ") vtable address --\n";
} else {
- uint64_t BaseOffset =
+ CharUnits BaseOffset =
AddressPointsByIndex.lower_bound(NextIndex)->second.getBaseOffset();
// We store the class names in a set to get a stable order.
@@ -2136,9 +2157,8 @@ void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
for (std::set<std::string>::const_iterator I = ClassNames.begin(),
E = ClassNames.end(); I != E; ++I) {
- // FIXME: Instead of dividing by 8, we should be using CharUnits.
Out << " -- (" << *I;
- Out << ", " << BaseOffset / 8 << ") vtable address --\n";
+ Out << ", " << BaseOffset.getQuantity() << ") vtable address --\n";
}
}
}
@@ -2153,12 +2173,13 @@ void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
// We store the virtual base class names and their offsets in a map to get
// a stable order.
- std::map<std::string, int64_t> ClassNamesAndOffsets;
+ std::map<std::string, CharUnits> ClassNamesAndOffsets;
for (VBaseOffsetOffsetsMapTy::const_iterator I = VBaseOffsetOffsets.begin(),
E = VBaseOffsetOffsets.end(); I != E; ++I) {
std::string ClassName = I->first->getQualifiedNameAsString();
- int64_t OffsetOffset = I->second;
- ClassNamesAndOffsets.insert(std::make_pair(ClassName, OffsetOffset));
+ CharUnits OffsetOffset = I->second;
+ ClassNamesAndOffsets.insert(
+ std::make_pair(ClassName, OffsetOffset));
}
Out << "Virtual base offset offsets for '";
@@ -2166,10 +2187,10 @@ void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
Out << ClassNamesAndOffsets.size();
Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
- for (std::map<std::string, int64_t>::const_iterator I =
+ for (std::map<std::string, CharUnits>::const_iterator I =
ClassNamesAndOffsets.begin(), E = ClassNamesAndOffsets.end();
I != E; ++I)
- Out << " " << I->first << " | " << I->second << '\n';
+ Out << " " << I->first << " | " << I->second.getQuantity() << '\n';
Out << "\n";
}
@@ -2233,9 +2254,51 @@ void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
}
Out << '\n';
+ }
+ }
+
+ // Compute the vtable indices for all the member functions.
+ // Store them in a map keyed by the index so we'll get a sorted table.
+ std::map<uint64_t, std::string> IndicesMap;
+
+ for (CXXRecordDecl::method_iterator i = MostDerivedClass->method_begin(),
+ e = MostDerivedClass->method_end(); i != e; ++i) {
+ const CXXMethodDecl *MD = *i;
+
+ // We only want virtual member functions.
+ if (!MD->isVirtual())
+ continue;
+
+ std::string MethodName =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ IndicesMap[VTables.getMethodVTableIndex(GlobalDecl(DD, Dtor_Complete))] =
+ MethodName + " [complete]";
+ IndicesMap[VTables.getMethodVTableIndex(GlobalDecl(DD, Dtor_Deleting))] =
+ MethodName + " [deleting]";
+ } else {
+ IndicesMap[VTables.getMethodVTableIndex(MD)] = MethodName;
+ }
+ }
+
+ // Print the vtable indices for all the member functions.
+ if (!IndicesMap.empty()) {
+ Out << "VTable indices for '";
+ Out << MostDerivedClass->getQualifiedNameAsString();
+ Out << "' (" << IndicesMap.size() << " entries).\n";
+ for (std::map<uint64_t, std::string>::const_iterator I = IndicesMap.begin(),
+ E = IndicesMap.end(); I != E; ++I) {
+ uint64_t VTableIndex = I->first;
+ const std::string &MethodName = I->second;
+
+ Out << llvm::format(" %4u | ", VTableIndex) << MethodName << '\n';
}
}
+
+ Out << '\n';
}
}
@@ -2243,14 +2306,16 @@ void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
static void
CollectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
VTableBuilder::PrimaryBasesSetVectorTy &PrimaryBases) {
- while (RD) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- if (PrimaryBase)
- PrimaryBases.insert(PrimaryBase);
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- RD = PrimaryBase;
- }
+ if (!PrimaryBase)
+ return;
+
+ CollectPrimaryBases(PrimaryBase, Context, PrimaryBases);
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ assert(false && "Found a duplicate primary base!");
}
void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
@@ -2410,8 +2475,9 @@ uint64_t CodeGenVTables::getMethodVTableIndex(GlobalDecl GD) {
return I->second;
}
-int64_t CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
- const CXXRecordDecl *VBase) {
+CharUnits
+CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase) {
ClassPairTy ClassPair(RD, VBase);
VirtualBaseClassOffsetOffsetsMapTy::iterator I =
@@ -2420,9 +2486,9 @@ int64_t CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
return I->second;
VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/0,
- BaseSubobject(RD, 0),
+ BaseSubobject(RD, CharUnits::Zero()),
/*BaseIsVirtual=*/false,
- /*OffsetInLayoutClass=*/0);
+ /*OffsetInLayoutClass=*/CharUnits::Zero());
for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
Builder.getVBaseOffsetOffsets().begin(),
@@ -2430,7 +2496,8 @@ int64_t CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
// Insert all types.
ClassPairTy ClassPair(RD, I->first);
- VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
+ VirtualBaseClassOffsetOffsets.insert(
+ std::make_pair(ClassPair, I->second));
}
I = VirtualBaseClassOffsetOffsets.find(ClassPair);
@@ -2531,7 +2598,7 @@ static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
Fn->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
return;
- if (MD->hasAttr<VisibilityAttr>())
+ if (MD->getExplicitVisibility())
return;
switch (MD->getTemplateSpecializationKind()) {
@@ -2559,8 +2626,19 @@ static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
}
-void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
- const ThunkInfo &Thunk) {
+#ifndef NDEBUG
+static bool similar(const ABIArgInfo &infoL, CanQualType typeL,
+ const ABIArgInfo &infoR, CanQualType typeR) {
+ return (infoL.getKind() == infoR.getKind() &&
+ (typeL == typeR ||
+ (isa<PointerType>(typeL) && isa<PointerType>(typeR)) ||
+ (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR))));
+}
+#endif
+
+void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
QualType ResultType = FPT->getResultType();
@@ -2580,10 +2658,11 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
E = MD->param_end(); I != E; ++I) {
ParmVarDecl *Param = *I;
- FunctionArgs.push_back(std::make_pair(Param, Param->getType()));
+ FunctionArgs.push_back(Param);
}
- StartFunction(GlobalDecl(), ResultType, Fn, FunctionArgs, SourceLocation());
+ StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
+ SourceLocation());
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
@@ -2596,16 +2675,13 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
CallArgList CallArgs;
// Add our adjusted 'this' pointer.
- CallArgs.push_back(std::make_pair(RValue::get(AdjustedThisPtr), ThisType));
+ CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);
// Add the rest of the parameters.
for (FunctionDecl::param_const_iterator I = MD->param_begin(),
E = MD->param_end(); I != E; ++I) {
- ParmVarDecl *Param = *I;
- QualType ArgType = Param->getType();
- RValue Arg = EmitDelegateCallArg(Param);
-
- CallArgs.push_back(std::make_pair(Arg, ArgType));
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
}
// Get our callee.
@@ -2614,9 +2690,20 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
FPT->isVariadic());
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
- const CGFunctionInfo &FnInfo =
- CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
- FPT->getExtInfo());
+#ifndef NDEBUG
+ const CGFunctionInfo &CallFnInfo =
+ CGM.getTypes().getFunctionInfo(ResultType, CallArgs, FPT->getExtInfo());
+ assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() &&
+ CallFnInfo.isNoReturn() == FnInfo.isNoReturn() &&
+ CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention());
+ assert(similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
+ FnInfo.getReturnInfo(), FnInfo.getReturnType()));
+ assert(CallFnInfo.arg_size() == FnInfo.arg_size());
+ for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i)
+ assert(similar(CallFnInfo.arg_begin()[i].info,
+ CallFnInfo.arg_begin()[i].type,
+ FnInfo.arg_begin()[i].info, FnInfo.arg_begin()[i].type));
+#endif
// Determine whether we have a return value slot to use.
ReturnValueSlot Slot;
@@ -2658,8 +2745,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
Builder.CreateBr(AdjustEnd);
EmitBlock(AdjustEnd);
- llvm::PHINode *PHI = Builder.CreatePHI(ReturnValue->getType());
- PHI->reserveOperandSpace(2);
+ llvm::PHINode *PHI = Builder.CreatePHI(ReturnValue->getType(), 2);
PHI->addIncoming(ReturnValue, AdjustNotNull);
PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
AdjustNull);
@@ -2684,6 +2770,9 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
bool UseAvailableExternallyLinkage)
{
+ const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(GD);
+
+ // FIXME: re-use FnInfo in this computation.
llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
// Strip off a bitcast if we got one back.
@@ -2735,7 +2824,7 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
}
// Actually generate the thunk body.
- CodeGenFunction(CGM).GenerateThunk(ThunkFn, GD, Thunk);
+ CodeGenFunction(CGM).GenerateThunk(ThunkFn, FnInfo, GD, Thunk);
if (UseAvailableExternallyLinkage)
ThunkFn->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
@@ -2796,7 +2885,8 @@ void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
if (Entry.getPointer())
return;
- VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
+ VTableBuilder Builder(*this, RD, CharUnits::Zero(),
+ /*MostDerivedClassIsVirtual=*/0, RD);
// Add the VTable layout.
uint64_t NumVTableComponents = Builder.getNumVTableComponents();
@@ -2864,7 +2954,8 @@ void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
// Insert all types.
ClassPairTy ClassPair(RD, I->first);
- VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
+ VirtualBaseClassOffsetOffsets.insert(
+ std::make_pair(ClassPair, I->second));
}
}
@@ -2895,15 +2986,18 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
switch (Component.getKind()) {
case VTableComponent::CK_VCallOffset:
- Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVCallOffset());
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getVCallOffset().getQuantity());
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
break;
case VTableComponent::CK_VBaseOffset:
- Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVBaseOffset());
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getVBaseOffset().getQuantity());
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
break;
case VTableComponent::CK_OffsetToTop:
- Init = llvm::ConstantInt::get(PtrDiffTy, Component.getOffsetToTop());
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getOffsetToTop().getQuantity());
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
break;
case VTableComponent::CK_RTTI:
@@ -3001,7 +3095,8 @@ CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
const CXXRecordDecl *RD) {
// Dump the vtable layout if necessary.
if (CGM.getLangOptions().DumpVTableLayouts) {
- VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
+ VTableBuilder Builder(*this, RD, CharUnits::Zero(),
+ /*MostDerivedClassIsVirtual=*/0, RD);
Builder.dumpLayout(llvm::errs());
}
@@ -3028,8 +3123,10 @@ llvm::GlobalVariable *
CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
const BaseSubobject &Base,
bool BaseIsVirtual,
+ llvm::GlobalVariable::LinkageTypes Linkage,
VTableAddressPointsMapTy& AddressPoints) {
- VTableBuilder Builder(*this, Base.getBase(), Base.getBaseOffset(),
+ VTableBuilder Builder(*this, Base.getBase(),
+ Base.getBaseOffset(),
/*MostDerivedClassIsVirtual=*/BaseIsVirtual, RD);
// Dump the vtable layout if necessary.
@@ -3044,7 +3141,8 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
llvm::SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().
- mangleCXXCtorVTable(RD, Base.getBaseOffset() / 8, Base.getBase(), Out);
+ mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), Base.getBase(),
+ Out);
Out.flush();
llvm::StringRef Name = OutName.str();
@@ -3054,8 +3152,11 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Create the variable that will hold the construction vtable.
llvm::GlobalVariable *VTable =
- CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
- llvm::GlobalValue::InternalLinkage);
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage);
+ CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForConstructionVTable);
+
+ // V-tables are always unnamed_addr.
+ VTable->setUnnamedAddr(true);
// Add the thunks.
VTableThunksTy VTableThunks;
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index 7c119fa..e830e9a 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -1,4 +1,4 @@
-//===--- CGVTables.h - Emit LLVM Code for C++ vtables ---------------------===//
+//===--- CGVTables.h - Emit LLVM Code for C++ vtables -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,6 +17,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/GlobalVariable.h"
#include "clang/Basic/ABI.h"
+#include "clang/AST/CharUnits.h"
#include "GlobalDecl.h"
namespace clang {
@@ -33,17 +34,17 @@ class BaseSubobject {
const CXXRecordDecl *Base;
/// BaseOffset - The offset from the most derived class to the base class.
- uint64_t BaseOffset;
+ CharUnits BaseOffset;
public:
- BaseSubobject(const CXXRecordDecl *Base, uint64_t BaseOffset)
+ BaseSubobject(const CXXRecordDecl *Base, CharUnits BaseOffset)
: Base(Base), BaseOffset(BaseOffset) { }
/// getBase - Returns the base class declaration.
const CXXRecordDecl *getBase() const { return Base; }
/// getBaseOffset - Returns the base class offset.
- uint64_t getBaseOffset() const { return BaseOffset; }
+ CharUnits getBaseOffset() const { return BaseOffset; }
friend bool operator==(const BaseSubobject &LHS, const BaseSubobject &RHS) {
return LHS.Base == RHS.Base && LHS.BaseOffset == RHS.BaseOffset;
@@ -59,19 +60,19 @@ template<> struct DenseMapInfo<clang::CodeGen::BaseSubobject> {
static clang::CodeGen::BaseSubobject getEmptyKey() {
return clang::CodeGen::BaseSubobject(
DenseMapInfo<const clang::CXXRecordDecl *>::getEmptyKey(),
- DenseMapInfo<uint64_t>::getEmptyKey());
+ clang::CharUnits::fromQuantity(DenseMapInfo<int64_t>::getEmptyKey()));
}
static clang::CodeGen::BaseSubobject getTombstoneKey() {
return clang::CodeGen::BaseSubobject(
DenseMapInfo<const clang::CXXRecordDecl *>::getTombstoneKey(),
- DenseMapInfo<uint64_t>::getTombstoneKey());
+ clang::CharUnits::fromQuantity(DenseMapInfo<int64_t>::getTombstoneKey()));
}
static unsigned getHashValue(const clang::CodeGen::BaseSubobject &Base) {
return
DenseMapInfo<const clang::CXXRecordDecl *>::getHashValue(Base.getBase()) ^
- DenseMapInfo<uint64_t>::getHashValue(Base.getBaseOffset());
+ DenseMapInfo<int64_t>::getHashValue(Base.getBaseOffset().getQuantity());
}
static bool isEqual(const clang::CodeGen::BaseSubobject &LHS,
@@ -102,9 +103,9 @@ class CodeGenVTables {
const CXXRecordDecl *> ClassPairTy;
/// VirtualBaseClassOffsetOffsets - Contains the vtable offset (relative to
- /// the address point) in bytes where the offsets for virtual bases of a class
+ /// the address point) in chars where the offsets for virtual bases of a class
/// are stored.
- typedef llvm::DenseMap<ClassPairTy, int64_t>
+ typedef llvm::DenseMap<ClassPairTy, CharUnits>
VirtualBaseClassOffsetOffsetsMapTy;
VirtualBaseClassOffsetOffsetsMapTy VirtualBaseClassOffsetOffsets;
@@ -234,13 +235,13 @@ public:
/// stored.
uint64_t getMethodVTableIndex(GlobalDecl GD);
- /// getVirtualBaseOffsetOffset - Return the offset in bytes (relative to the
+ /// getVirtualBaseOffsetOffset - Return the offset in chars (relative to the
/// vtable address point) where the offset of the virtual base that contains
/// the given base is stored, otherwise, if no virtual base contains the given
/// class, return 0. Base must be a virtual base class or an unambigious
/// base.
- int64_t getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
- const CXXRecordDecl *VBase);
+ CharUnits getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase);
/// getAddressPoint - Get the address point of the given subobject in the
/// class decl.
@@ -259,6 +260,7 @@ public:
llvm::GlobalVariable *
GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base,
bool BaseIsVirtual,
+ llvm::GlobalVariable::LinkageTypes Linkage,
VTableAddressPointsMapTy& AddressPoints);
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 8c20f29..80e46d2 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -2,6 +2,7 @@ set(LLVM_LINK_COMPONENTS
asmparser
bitreader
bitwriter
+ instrumentation
ipo
)
@@ -29,6 +30,7 @@ add_clang_library(clangCodeGen
CGObjC.cpp
CGObjCGNU.cpp
CGObjCMac.cpp
+ CGObjCRuntime.cpp
CGRecordLayoutBuilder.cpp
CGRTTI.cpp
CGStmt.cpp
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index a24bbc4..62fa1f9 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -216,7 +216,7 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
return;
}
- // Otherwise, report the backend error as occuring in the generated .s file.
+ // Otherwise, report the backend error as occurring in the generated .s file.
// If Loc is invalid, we still need to report the error, it just gets no
// location info.
Diags.Report(Loc, diag::err_fe_inline_asm).AddString(Message);
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index f1b7286..626c2b0 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -33,7 +33,7 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()),
BlockInfo(0), BlockPointer(0),
NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
- ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
+ ExceptionSlot(0), DebugInfo(0), DisableDebugInfo(false), IndirectBranch(0),
SwitchInsn(0), CaseRangeBlock(0),
DidCallStackSave(false), UnreachableBlock(0),
CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
@@ -210,6 +210,7 @@ void CodeGenFunction::EmitMCountInstrumentation() {
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
const FunctionArgList &Args,
SourceLocation StartLoc) {
const Decl *D = GD.getDecl();
@@ -218,6 +219,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
CurCodeDecl = CurFuncDecl = D;
FnRetTy = RetTy;
CurFn = Fn;
+ CurFnInfo = &FnInfo;
assert(CurFn->isDeclaration() && "Function already has body?");
// Pass inline keyword to optimizer if it appears explicitly on any
@@ -239,7 +241,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
llvm::Value *Op = Fn;
- OpenCLMetadata->addOperand(llvm::MDNode::get(Context, &Op, 1));
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op));
}
}
@@ -275,11 +277,6 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (CGM.getCodeGenOpts().InstrumentForProfiling)
EmitMCountInstrumentation();
- // FIXME: Leaked.
- // CC info is ignored, hopefully?
- CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
- FunctionType::ExtInfo());
-
if (RetTy->isVoidType()) {
// Void type; nothing to return.
ReturnValue = 0;
@@ -302,7 +299,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// emit the type size.
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i) {
- QualType Ty = i->second;
+ QualType Ty = (*i)->getType();
if (Ty->isVariablyModifiedType())
EmitVLASize(Ty);
@@ -332,12 +329,13 @@ static void TryMarkNoThrow(llvm::Function *F) {
F->setDoesNotThrow(true);
}
-void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
+void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
// Check if we should generate debug info for this function.
- if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
- DebugInfo = CGM.getDebugInfo();
+ if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getModuleDebugInfo();
FunctionArgList Args;
QualType ResTy = FD->getResultType();
@@ -346,20 +344,15 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
- if (FD->getNumParams()) {
- const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
- assert(FProto && "Function def must have prototype!");
-
+ if (FD->getNumParams())
for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
- Args.push_back(std::make_pair(FD->getParamDecl(i),
- FProto->getArgType(i)));
- }
+ Args.push_back(FD->getParamDecl(i));
SourceRange BodyRange;
if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
// Emit the standard function prologue.
- StartFunction(GD, ResTy, Fn, Args, BodyRange.getBegin());
+ StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin());
// Generate the body of the function.
if (isa<CXXDestructorDecl>(FD))
@@ -387,9 +380,12 @@ bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
// If this is a label, we have to emit the code, consider something like:
// if (0) { ... foo: bar(); } goto foo;
+ //
+ // TODO: If anyone cared, we could track __label__'s, since we know that you
+ // can't jump to one from outside their declared region.
if (isa<LabelStmt>(S))
return true;
-
+
// If this is a case/default statement, and we haven't seen a switch, we have
// to emit the code.
if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
@@ -407,26 +403,65 @@ bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
return false;
}
+/// containsBreak - Return true if the statement contains a break out of it.
+/// If the statement (recursively) contains a switch or loop with a break
+/// inside of it, this is fine.
+bool CodeGenFunction::containsBreak(const Stmt *S) {
+ // Null statement, not a label!
+ if (S == 0) return false;
+
+ // If this is a switch or loop that defines its own break scope, then we can
+ // include it and anything inside of it.
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
+ isa<ForStmt>(S))
+ return false;
+
+ if (isa<BreakStmt>(S))
+ return true;
+
+ // Scan subexpressions for verboten breaks.
+ for (Stmt::const_child_range I = S->children(); I; ++I)
+ if (containsBreak(*I))
+ return true;
+
+ return false;
+}
+
+
+/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+/// to a constant, or if it does but contains a label, return false. If it
+/// constant folds return true and set the boolean result in Result.
+bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
+ bool &ResultBool) {
+ llvm::APInt ResultInt;
+ if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
+ return false;
+
+ ResultBool = ResultInt.getBoolValue();
+ return true;
+}
-/// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
-/// a constant, or if it does but contains a label, return 0. If it constant
-/// folds to 'true' and does not contain a label, return 1, if it constant folds
-/// to 'false' and does not contain a label, return -1.
-int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
+/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+/// to a constant, or if it does but contains a label, return false. If it
+/// constant folds return true and set the folded value.
+bool CodeGenFunction::
+ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) {
// FIXME: Rename and handle conversion of other evaluatable things
// to bool.
Expr::EvalResult Result;
if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
Result.HasSideEffects)
- return 0; // Not foldable, not integer or not fully evaluatable.
-
+ return false; // Not foldable, not integer or not fully evaluatable.
+
if (CodeGenFunction::ContainsLabel(Cond))
- return 0; // Contains a label.
-
- return Result.Val.getInt().getBoolValue() ? 1 : -1;
+ return false; // Contains a label.
+
+ ResultInt = Result.Val.getInt();
+ return true;
}
+
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
/// statement) to the specified blocks. Based on the condition, this might try
/// to simplify the codegen of the conditional based on the branch.
@@ -434,22 +469,24 @@ int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock) {
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
- return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
+ Cond = Cond->IgnoreParens();
if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
// Handle X && Y in a condition.
if (CondBOp->getOpcode() == BO_LAnd) {
// If we have "1 && X", simplify the code. "0 && X" would have constant
// folded if the case was simple enough.
- if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
+ bool ConstantBool = false;
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
+ ConstantBool) {
// br(1 && X) -> br(X).
return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
}
// If we have "X && 1", simplify the code to use an uncond branch.
// "X && 0" would have been constant folded to 0.
- if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
+ ConstantBool) {
// br(X && 1) -> br(X).
return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
}
@@ -468,17 +505,22 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
eval.end(*this);
return;
- } else if (CondBOp->getOpcode() == BO_LOr) {
+ }
+
+ if (CondBOp->getOpcode() == BO_LOr) {
// If we have "0 || X", simplify the code. "1 || X" would have constant
// folded if the case was simple enough.
- if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
+ bool ConstantBool = false;
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
+ !ConstantBool) {
// br(0 || X) -> br(X).
return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
}
// If we have "X || 0", simplify the code to use an uncond branch.
// "X || 1" would have been constant folded to 1.
- if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
+ !ConstantBool) {
// br(X || 0) -> br(X).
return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
}
@@ -575,8 +617,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
// count must be nonzero.
CGF.EmitBlock(loopBB);
- llvm::PHINode *cur = Builder.CreatePHI(i8p, "vla.cur");
- cur->reserveOperandSpace(2);
+ llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
cur->addIncoming(begin, originBB);
// memcpy the individual element bit-pattern.
@@ -613,15 +654,16 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
// Get size and alignment info for this aggregate.
- std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
- uint64_t Size = TypeInfo.first / 8;
- unsigned Align = TypeInfo.second / 8;
+ std::pair<CharUnits, CharUnits> TypeInfo =
+ getContext().getTypeInfoInChars(Ty);
+ CharUnits Size = TypeInfo.first;
+ CharUnits Align = TypeInfo.second;
llvm::Value *SizeVal;
const VariableArrayType *vla;
// Don't bother emitting a zero-byte memset.
- if (Size == 0) {
+ if (Size.isZero()) {
// But note that getTypeInfo returns 0 for a VLA.
if (const VariableArrayType *vlaType =
dyn_cast_or_null<VariableArrayType>(
@@ -632,7 +674,7 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
return;
}
} else {
- SizeVal = llvm::ConstantInt::get(IntPtrTy, Size);
+ SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
vla = 0;
}
@@ -657,14 +699,15 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
// Get and call the appropriate llvm.memcpy overload.
- Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align, false);
+ Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
return;
}
// Otherwise, just memset the whole thing to zero. This is legal
// because in LLVM, all default initializers (other than the ones we just
// handled above) are guaranteed to have a bit pattern of all zeros.
- Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, Align, false);
+ Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
+ Align.getQuantity(), false);
}
llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
@@ -686,7 +729,8 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
// Create the PHI node that indirect gotos will add entries to.
- llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
+ llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
+ "indirect.goto.dest");
// Create the indirect branch instruction.
IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index be646fb..169c576 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -25,7 +25,6 @@
#include "llvm/Support/ValueHandle.h"
#include "CodeGenModule.h"
#include "CGBuilder.h"
-#include "CGCall.h"
#include "CGValue.h"
namespace llvm {
@@ -43,6 +42,7 @@ namespace clang {
class APValue;
class ASTContext;
class CXXDestructorDecl;
+ class CXXForRangeStmt;
class CXXTryStmt;
class Decl;
class LabelDecl;
@@ -769,6 +769,11 @@ public:
/// block through the normal cleanup handling code (if any) and then
/// on to \arg Dest.
void EmitBranchThroughCleanup(JumpDest Dest);
+
+ /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
+ /// specified destination obviously has no cleanups to run. 'false' is always
+ /// a conservatively correct answer for this method.
+ bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
/// EmitBranchThroughEHCleanup - Emit a branch from the current
/// insert block through the EH cleanup handling code (if any) and
@@ -944,6 +949,7 @@ public:
const VarDecl *V);
private:
CGDebugInfo *DebugInfo;
+ bool DisableDebugInfo;
/// IndirectBranch - The first time an indirect goto is seen we create a block
/// with an indirect branch. Every time we see the address of a label taken,
@@ -1030,7 +1036,14 @@ public:
CodeGenTypes &getTypes() const { return CGM.getTypes(); }
ASTContext &getContext() const;
- CGDebugInfo *getDebugInfo() { return DebugInfo; }
+ CGDebugInfo *getDebugInfo() {
+ if (DisableDebugInfo)
+ return NULL;
+ return DebugInfo;
+ }
+ void disableDebugInfo() { DisableDebugInfo = true; }
+ void enableDebugInfo() { DisableDebugInfo = false; }
+
const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
@@ -1100,15 +1113,13 @@ public:
llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
- llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *,
- BlockFieldFlags flags,
- const VarDecl *BD);
- llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
- BlockFieldFlags flags,
- const VarDecl *BD);
-
void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
+ class AutoVarEmission;
+
+ void emitByrefStructureInit(const AutoVarEmission &emission);
+ void enterByrefCleanup(const AutoVarEmission &emission);
+
llvm::Value *LoadBlockStruct() {
assert(BlockPointer && "no block pointer set!");
return BlockPointer;
@@ -1122,9 +1133,11 @@ public:
llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
const llvm::Type *BuildByRefType(const VarDecl *var);
- void GenerateCode(GlobalDecl GD, llvm::Function *Fn);
+ void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo);
void StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
const FunctionArgList &Args,
SourceLocation StartLoc);
@@ -1141,7 +1154,8 @@ public:
void FinishFunction(SourceLocation EndLoc=SourceLocation());
/// GenerateThunk - Generate a thunk for the given method.
- void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk);
+ void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk);
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
FunctionArgList &Args);
@@ -1151,14 +1165,14 @@ public:
///
void InitializeVTablePointer(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
- uint64_t OffsetFromNearestVBase,
+ CharUnits OffsetFromNearestVBase,
llvm::Constant *VTable,
const CXXRecordDecl *VTableClass);
typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
void InitializeVTablePointers(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
- uint64_t OffsetFromNearestVBase,
+ CharUnits OffsetFromNearestVBase,
bool BaseIsNonVirtualPrimaryBase,
llvm::Constant *VTable,
const CXXRecordDecl *VTableClass,
@@ -1353,12 +1367,18 @@ public:
/// always be accessible even if no aggregate location is provided.
RValue EmitAnyExprToTemp(const Expr *E);
- /// EmitsAnyExprToMem - Emits the code necessary to evaluate an
+ /// EmitAnyExprToMem - Emits the code necessary to evaluate an
/// arbitrary expression into the given memory location.
void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
bool IsLocationVolatile,
bool IsInitializer);
+ /// EmitExprAsInit - Emits the code necessary to initialize a
+ /// location in memory with the given initializer.
+ void EmitExprAsInit(const Expr *init, const VarDecl *var,
+ llvm::Value *loc, CharUnits alignment,
+ bool capturedByInit);
+
/// EmitAggregateCopy - Emit an aggrate copy.
///
/// \param isVolatile - True iff either the source or the destination is
@@ -1476,6 +1496,12 @@ public:
void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
CXXCtorType CtorType,
const FunctionArgList &Args);
+ // It's important not to confuse this and the previous function. Delegating
+ // constructors are the C++0x feature. The constructor delegate optimization
+ // is used to reduce duplication in the base and complete consturctors where
+ // they are substantially the same.
+ void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ const FunctionArgList &Args);
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
@@ -1609,7 +1635,7 @@ public:
llvm::GlobalValue::LinkageTypes Linkage);
/// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
- void EmitParmDecl(const VarDecl &D, llvm::Value *Arg);
+ void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
/// protectFromPeepholes - Protect a value that we're intending to
/// store to the side, but which will probably be used later, from
@@ -1680,6 +1706,7 @@ public:
void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
void EmitCXXTryStmt(const CXXTryStmt &S);
+ void EmitCXXForRangeStmt(const CXXForRangeStmt &S);
//===--------------------------------------------------------------------===//
// LValue Expression Emission
@@ -1775,7 +1802,7 @@ public:
LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
- // Note: only availabe for agg return types
+ // Note: only available for agg return types
LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
// Note: only available for agg return types
@@ -2022,7 +2049,8 @@ public:
const std::vector<std::pair<llvm::WeakVH,
llvm::Constant*> > &DtorsAndObjects);
- void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D,
+ void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+ const VarDecl *D,
llvm::GlobalVariable *Addr);
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
@@ -2044,12 +2072,21 @@ public:
/// that we can just remove the code.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
+ /// containsBreak - Return true if the statement contains a break out of it.
+ /// If the statement (recursively) contains a switch or loop with a break
+ /// inside of it, this is fine.
+ static bool containsBreak(const Stmt *S);
+
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
- /// to a constant, or if it does but contains a label, return 0. If it
- /// constant folds to 'true' and does not contain a label, return 1, if it
- /// constant folds to 'false' and does not contain a label, return -1.
- int ConstantFoldsToSimpleInteger(const Expr *Cond);
+ /// to a constant, or if it does but contains a label, return false. If it
+ /// constant folds return true and set the boolean result in Result.
+ bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return false. If it
+ /// constant folds return true and set the folded value.
+ bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &Result);
+
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
/// if statement) to the specified blocks. Based on the condition, this might
/// try to simplify the codegen of the conditional based on the branch.
@@ -2061,12 +2098,12 @@ public:
llvm::BasicBlock *getTrapBB();
/// EmitCallArg - Emit a single call argument.
- RValue EmitCallArg(const Expr *E, QualType ArgType);
+ void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
/// EmitDelegateCallArg - We are performing a delegate call; that
/// is, the current function is delegating to another one. Produce
/// a r-value suitable for passing the given parameter.
- RValue EmitDelegateCallArg(const VarDecl *Param);
+ void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
private:
void EmitReturnOfRValue(RValue RV, QualType Ty);
@@ -2131,8 +2168,7 @@ private:
getContext().getCanonicalType(ActualArgType).getTypePtr() &&
"type mismatch in call argument!");
#endif
- Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
- ArgType));
+ EmitCallArg(Args, *Arg, ArgType);
}
// Either we've emitted all the call args, or we have a call to a
@@ -2143,11 +2179,8 @@ private:
}
// If we still have any arguments, emit them using the type of the argument.
- for (; Arg != ArgEnd; ++Arg) {
- QualType ArgType = Arg->getType();
- Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
- ArgType));
- }
+ for (; Arg != ArgEnd; ++Arg)
+ EmitCallArg(Args, *Arg, Arg->getType());
}
const TargetCodeGenInfo &getTargetHooks() const {
@@ -2155,6 +2188,10 @@ private:
}
void EmitDeclMetadata();
+
+ CodeGenModule::ByrefHelpers *
+ buildByrefHelpers(const llvm::StructType &byrefType,
+ const AutoVarEmission &emission);
};
/// Helper class with most of the code for saving a value for a
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index a8453c3..83e927f 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -64,7 +64,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
ABI(createCXXABI(*this)),
Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI),
TBAA(0),
- VTables(*this), Runtime(0),
+ VTables(*this), Runtime(0), DebugInfo(0),
CFConstantStringClassRef(0), ConstantStringClassRef(0),
VMContext(M.getContext()),
NSConcreteGlobalBlockDecl(0), NSConcreteStackBlockDecl(0),
@@ -72,22 +72,19 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
BlockObjectAssignDecl(0), BlockObjectDisposeDecl(0),
BlockObjectAssign(0), BlockObjectDispose(0),
BlockDescriptorType(0), GenericBlockLiteralType(0) {
- if (!Features.ObjC1)
- Runtime = 0;
- else if (!Features.NeXTRuntime)
- Runtime = CreateGNUObjCRuntime(*this);
- else if (Features.ObjCNonFragileABI)
- Runtime = CreateMacNonFragileABIObjCRuntime(*this);
- else
- Runtime = CreateMacObjCRuntime(*this);
+ if (Features.ObjC1)
+ createObjCRuntime();
// Enable TBAA unless it's suppressed.
if (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)
TBAA = new CodeGenTBAA(Context, VMContext, getLangOptions(),
ABI.getMangleContext());
- // If debug info generation is enabled, create the CGDebugInfo object.
- DebugInfo = CodeGenOpts.DebugInfo ? new CGDebugInfo(*this) : 0;
+ // If debug info or coverage generation is enabled, create the CGDebugInfo
+ // object.
+ if (CodeGenOpts.DebugInfo || CodeGenOpts.EmitGcovArcs ||
+ CodeGenOpts.EmitGcovNotes)
+ DebugInfo = new CGDebugInfo(*this);
Block.GlobalUniqueCount = 0;
@@ -115,8 +112,6 @@ CodeGenModule::~CodeGenModule() {
void CodeGenModule::createObjCRuntime() {
if (!Features.NeXTRuntime)
Runtime = CreateGNUObjCRuntime(*this);
- else if (Features.ObjCNonFragileABI)
- Runtime = CreateMacNonFragileABIObjCRuntime(*this);
else
Runtime = CreateMacObjCRuntime(*this);
}
@@ -139,6 +134,13 @@ void CodeGenModule::Release() {
EmitDeclMetadata();
}
+void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
+ // Make sure that this type is translated.
+ Types.UpdateCompletedType(TD);
+ if (DebugInfo)
+ DebugInfo->UpdateCompletedType(TD);
+}
+
llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
if (!TBAA)
return 0;
@@ -151,7 +153,12 @@ void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
}
bool CodeGenModule::isTargetDarwin() const {
- return getContext().Target.getTriple().getOS() == llvm::Triple::Darwin;
+ return getContext().Target.getTriple().isOSDarwin();
+}
+
+void CodeGenModule::Error(SourceLocation loc, llvm::StringRef error) {
+ unsigned diagID = getDiags().getCustomDiagID(Diagnostic::Error, error);
+ getDiags().Report(Context.getFullLoc(loc), diagID);
}
/// ErrorUnsupported - Print out an error that codegen doesn't support the
@@ -220,7 +227,7 @@ void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
return;
// Don't override an explicit visibility attribute.
- if (RD->hasAttr<VisibilityAttr>())
+ if (RD->getExplicitVisibility())
return;
switch (RD->getTemplateSpecializationKind()) {
@@ -501,6 +508,13 @@ void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
llvm::Function *F,
bool IsIncompleteFunction) {
+ if (unsigned IID = F->getIntrinsicID()) {
+ // If this is an intrinsic function, set the function's attributes
+ // to the intrinsic's attributes.
+ F->setAttributes(llvm::Intrinsic::getAttributes((llvm::Intrinsic::ID)IID));
+ return;
+ }
+
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (!IsIncompleteFunction)
@@ -512,7 +526,7 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
if (FD->hasAttr<DLLImportAttr>()) {
F->setLinkage(llvm::Function::DLLImportLinkage);
} else if (FD->hasAttr<WeakAttr>() ||
- FD->hasAttr<WeakImportAttr>()) {
+ FD->isWeakImported()) {
// "extern_weak" is overloaded in LLVM; we probably should have
// separate linkage types for this.
F->setLinkage(llvm::Function::ExternalWeakLinkage);
@@ -989,7 +1003,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
} else {
if (D->hasAttr<DLLImportAttr>())
GV->setLinkage(llvm::GlobalValue::DLLImportLinkage);
- else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>())
+ else if (D->hasAttr<WeakAttr>() || D->isWeakImported())
GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
// Set visibility on a declaration only if it's explicit.
@@ -1055,7 +1069,7 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
Ty = getTypes().ConvertTypeForMem(ASTTy);
const llvm::PointerType *PTy =
- llvm::PointerType::get(Ty, ASTTy.getAddressSpace());
+ llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
llvm::StringRef MangledName = getMangledName(D);
return GetOrCreateLLVMGlobal(MangledName, PTy, D);
@@ -1066,7 +1080,7 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
llvm::Constant *
CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
llvm::StringRef Name) {
- return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0,
+ return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0,
true);
}
@@ -1234,7 +1248,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
// from the type of the global (this happens with unions).
if (GV == 0 ||
GV->getType()->getElementType() != InitType ||
- GV->getType()->getAddressSpace() != ASTTy.getAddressSpace()) {
+ GV->getType()->getAddressSpace() !=
+ getContext().getTargetAddressSpace(ASTTy)) {
// Move the old entry aside so that we'll create a new one.
Entry->setName(llvm::StringRef());
@@ -1281,7 +1296,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
EmitCXXGlobalVarDeclInitFunc(D, GV);
// Emit global variable debug information.
- if (CGDebugInfo *DI = getDebugInfo()) {
+ if (CGDebugInfo *DI = getModuleDebugInfo()) {
DI->setLocation(D->getLocation());
DI->EmitGlobalVariable(GV, D);
}
@@ -1304,9 +1319,7 @@ CodeGenModule::GetLLVMLinkageVarDefinition(const VarDecl *D,
return llvm::GlobalVariable::WeakAnyLinkage;
} else if (Linkage == GVA_TemplateInstantiation ||
Linkage == GVA_ExplicitTemplateInstantiation)
- // FIXME: It seems like we can provide more specific linkage here
- // (LinkOnceODR, WeakODR).
- return llvm::GlobalVariable::WeakAnyLinkage;
+ return llvm::GlobalVariable::WeakODRLinkage;
else if (!getLangOptions().CPlusPlus &&
((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
D->getAttr<CommonAttr>()) &&
@@ -1391,7 +1404,14 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
- const llvm::FunctionType *Ty = getTypes().GetFunctionType(GD);
+
+ // Compute the function info and LLVM type.
+ const CGFunctionInfo &FI = getTypes().getFunctionInfo(GD);
+ bool variadic = false;
+ if (const FunctionProtoType *fpt = D->getType()->getAs<FunctionProtoType>())
+ variadic = fpt->isVariadic();
+ const llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic, false);
+
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1451,7 +1471,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
// FIXME: this is redundant with part of SetFunctionDefinitionAttributes
setGlobalVisibility(Fn, D);
- CodeGenFunction(*this).GenerateCode(D, Fn);
+ CodeGenFunction(*this).GenerateCode(D, Fn, FI);
SetFunctionDefinitionAttributes(D, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
@@ -1525,7 +1545,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
}
} else if (D->hasAttr<WeakAttr>() ||
D->hasAttr<WeakRefAttr>() ||
- D->hasAttr<WeakImportAttr>()) {
+ D->isWeakImported()) {
GA->setLinkage(llvm::Function::WeakAnyLinkage);
}
@@ -1662,7 +1682,10 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
// does make plain ascii ones writable.
isConstant = true;
} else {
- Linkage = llvm::GlobalValue::PrivateLinkage;
+ // FIXME: With OS X ld 123.2 (xcode 4) and LTO we would get a linker error
+ // when using private linkage. It is not clear if this is a bug in ld
+ // or a reasonable new restriction.
+ Linkage = llvm::GlobalValue::LinkerPrivateLinkage;
isConstant = !Features.WritableStrings;
}
@@ -1673,6 +1696,9 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
if (isUTF16) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
GV->setAlignment(Align.getQuantity());
+ } else {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
+ GV->setAlignment(Align.getQuantity());
}
Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
@@ -1765,6 +1791,9 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
if (isUTF16) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
GV->setAlignment(Align.getQuantity());
+ } else {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
+ GV->setAlignment(Align.getQuantity());
}
Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
@@ -1835,7 +1864,7 @@ CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
/// GenerateWritableString -- Creates storage for a string literal.
-static llvm::Constant *GenerateStringLiteral(const std::string &str,
+static llvm::Constant *GenerateStringLiteral(llvm::StringRef str,
bool constant,
CodeGenModule &CGM,
const char *GlobalName) {
@@ -1860,7 +1889,7 @@ static llvm::Constant *GenerateStringLiteral(const std::string &str,
/// Feature.WriteableStrings.
///
/// The result has pointer to array type.
-llvm::Constant *CodeGenModule::GetAddrOfConstantString(const std::string &str,
+llvm::Constant *CodeGenModule::GetAddrOfConstantString(llvm::StringRef Str,
const char *GlobalName) {
bool IsConstant = !Features.WritableStrings;
@@ -1870,26 +1899,27 @@ llvm::Constant *CodeGenModule::GetAddrOfConstantString(const std::string &str,
// Don't share any string literals if strings aren't constant.
if (!IsConstant)
- return GenerateStringLiteral(str, false, *this, GlobalName);
+ return GenerateStringLiteral(Str, false, *this, GlobalName);
llvm::StringMapEntry<llvm::Constant *> &Entry =
- ConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]);
+ ConstantStringMap.GetOrCreateValue(Str);
if (Entry.getValue())
return Entry.getValue();
// Create a global variable for this.
- llvm::Constant *C = GenerateStringLiteral(str, true, *this, GlobalName);
+ llvm::Constant *C = GenerateStringLiteral(Str, true, *this, GlobalName);
Entry.setValue(C);
return C;
}
/// GetAddrOfConstantCString - Returns a pointer to a character
-/// array containing the literal and a terminating '\-'
+/// array containing the literal and a terminating '\0'
/// character. The result has pointer to array type.
-llvm::Constant *CodeGenModule::GetAddrOfConstantCString(const std::string &str,
+llvm::Constant *CodeGenModule::GetAddrOfConstantCString(const std::string &Str,
const char *GlobalName){
- return GetAddrOfConstantString(str + '\0', GlobalName);
+ llvm::StringRef StrWithNull(Str.c_str(), Str.size() + 1);
+ return GetAddrOfConstantString(StrWithNull, GlobalName);
}
/// EmitObjCPropertyImplementations - Emit information for synthesized
@@ -1920,37 +1950,48 @@ void CodeGenModule::EmitObjCPropertyImplementations(const
}
}
+static bool needsDestructMethod(ObjCImplementationDecl *impl) {
+ ObjCInterfaceDecl *iface
+ = const_cast<ObjCInterfaceDecl*>(impl->getClassInterface());
+ for (ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar())
+ if (ivar->getType().isDestructedType())
+ return true;
+
+ return false;
+}
+
/// EmitObjCIvarInitializations - Emit information for ivar initialization
/// for an implementation.
void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
- if (!Features.NeXTRuntime || D->getNumIvarInitializers() == 0)
+ // We might need a .cxx_destruct even if we don't have any ivar initializers.
+ if (needsDestructMethod(D)) {
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
+ ObjCMethodDecl *DTORMethod =
+ ObjCMethodDecl::Create(getContext(), D->getLocation(), D->getLocation(),
+ cxxSelector, getContext().VoidTy, 0, D, true,
+ false, true, false, ObjCMethodDecl::Required);
+ D->addInstanceMethod(DTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
+ }
+
+ // If the implementation doesn't have any ivar initializers, we don't need
+ // a .cxx_construct.
+ if (D->getNumIvarInitializers() == 0)
return;
- DeclContext* DC = const_cast<DeclContext*>(dyn_cast<DeclContext>(D));
- assert(DC && "EmitObjCIvarInitializations - null DeclContext");
- IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
- Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
- ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(getContext(),
- D->getLocation(),
- D->getLocation(), cxxSelector,
- getContext().VoidTy, 0,
- DC, true, false, true, false,
- ObjCMethodDecl::Required);
- D->addInstanceMethod(DTORMethod);
- CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
- II = &getContext().Idents.get(".cxx_construct");
- cxxSelector = getContext().Selectors.getSelector(0, &II);
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
// The constructor returns 'self'.
ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(getContext(),
D->getLocation(),
D->getLocation(), cxxSelector,
getContext().getObjCIdType(), 0,
- DC, true, false, true, false,
+ D, true, false, true, false,
ObjCMethodDecl::Required);
D->addInstanceMethod(CTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
-
-
}
/// EmitNamespace - Emit all declarations in a namespace.
@@ -1990,7 +2031,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::CXXMethod:
case Decl::Function:
// Skip function templates
- if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate())
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
+ cast<FunctionDecl>(D)->isLateTemplateParsed())
return;
EmitGlobal(cast<FunctionDecl>(D));
@@ -2000,6 +2042,11 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
EmitGlobal(cast<VarDecl>(D));
break;
+ // Indirect fields from global anonymous structs and unions can be
+ // ignored; only the actual variable requires IR gen support.
+ case Decl::IndirectField:
+ break;
+
// C++ Decls
case Decl::Namespace:
EmitNamespace(cast<NamespaceDecl>(D));
@@ -2014,12 +2061,15 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
case Decl::CXXConstructor:
// Skip function templates
- if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate())
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
+ cast<FunctionDecl>(D)->isLateTemplateParsed())
return;
EmitCXXConstructors(cast<CXXConstructorDecl>(D));
break;
case Decl::CXXDestructor:
+ if (cast<FunctionDecl>(D)->isLateTemplateParsed())
+ return;
EmitCXXDestructors(cast<CXXDestructorDecl>(D));
break;
@@ -2035,13 +2085,12 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::ObjCInterface:
break;
- case Decl::ObjCCategory: {
- ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
- if (CD->IsClassExtension() && CD->hasSynthBitfield())
- Context.ResetObjCLayout(CD->getClassInterface());
- break;
- }
-
+ case Decl::ObjCCategory: {
+ ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
+ if (CD->IsClassExtension() && CD->hasSynthBitfield())
+ Context.ResetObjCLayout(CD->getClassInterface());
+ break;
+ }
case Decl::ObjCProtocol:
Runtime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
@@ -2118,7 +2167,7 @@ static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
Addr,
GetPointerConstant(CGM.getLLVMContext(), D.getDecl())
};
- GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops, 2));
+ GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
/// Emits metadata nodes associating all the global values in the
@@ -2159,7 +2208,7 @@ void CodeGenFunction::EmitDeclMetadata() {
if (llvm::AllocaInst *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
- Alloca->setMetadata(DeclPtrKind, llvm::MDNode::get(Context, &DAddr, 1));
+ Alloca->setMetadata(DeclPtrKind, llvm::MDNode::get(Context, DAddr));
} else if (llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 73e6ece..99c973c 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -20,7 +20,6 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Mangle.h"
-#include "CGCall.h"
#include "CGVTables.h"
#include "CodeGenTypes.h"
#include "GlobalDecl.h"
@@ -69,12 +68,14 @@ namespace clang {
namespace CodeGen {
+ class CallArgList;
class CodeGenFunction;
class CodeGenTBAA;
class CGCXXABI;
class CGDebugInfo;
class CGObjCRuntime;
class BlockFieldFlags;
+ class FunctionArgList;
struct OrderGlobalInits {
unsigned int priority;
@@ -246,9 +247,6 @@ class CodeGenModule : public CodeGenTypeCache {
int GlobalUniqueCount;
} Block;
- llvm::DenseMap<uint64_t, llvm::Constant *> AssignCache;
- llvm::DenseMap<uint64_t, llvm::Constant *> DestroyCache;
-
/// @}
public:
CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
@@ -281,7 +279,8 @@ public:
StaticLocalDeclMap[D] = GV;
}
- CGDebugInfo *getDebugInfo() { return DebugInfo; }
+ CGDebugInfo *getModuleDebugInfo() { return DebugInfo; }
+
ASTContext &getContext() const { return Context; }
const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
const LangOptions &getLangOptions() const { return Features; }
@@ -311,6 +310,7 @@ public:
enum TypeVisibilityKind {
TVK_ForVTT,
TVK_ForVTable,
+ TVK_ForConstructionVTable,
TVK_ForRTTI,
TVK_ForRTTIName
};
@@ -358,6 +358,7 @@ public:
llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
const llvm::Type *Ty = 0);
+
/// GetAddrOfFunction - Return the address of the given function. If Ty is
/// non-null, then this function will use the specified type if it has to
/// create it.
@@ -382,14 +383,35 @@ public:
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd);
- llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T,
- BlockFieldFlags flags,
- unsigned Align,
- const VarDecl *variable);
- llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T,
- BlockFieldFlags flags,
- unsigned Align,
- const VarDecl *variable);
+ /// A pair of helper functions for a __block variable.
+ class ByrefHelpers : public llvm::FoldingSetNode {
+ public:
+ llvm::Constant *CopyHelper;
+ llvm::Constant *DisposeHelper;
+
+ /// The alignment of the field. This is important because
+ /// different offsets to the field within the byref struct need to
+ /// have different helper functions.
+ CharUnits Alignment;
+
+ ByrefHelpers(CharUnits alignment) : Alignment(alignment) {}
+ virtual ~ByrefHelpers();
+
+ void Profile(llvm::FoldingSetNodeID &id) const {
+ id.AddInteger(Alignment.getQuantity());
+ profileImpl(id);
+ }
+ virtual void profileImpl(llvm::FoldingSetNodeID &id) const = 0;
+
+ virtual bool needsCopy() const { return true; }
+ virtual void emitCopy(CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src) = 0;
+
+ virtual bool needsDispose() const { return true; }
+ virtual void emitDispose(CodeGenFunction &CGF, llvm::Value *field) = 0;
+ };
+
+ llvm::FoldingSet<ByrefHelpers> ByrefHelpersCache;
/// getUniqueBlockCount - Fetches the global unique block count.
int getUniqueBlockCount() { return ++Block.GlobalUniqueCount; }
@@ -437,7 +459,7 @@ public:
///
/// \param GlobalName If provided, the name to use for the global
/// (if one is created).
- llvm::Constant *GetAddrOfConstantString(const std::string& str,
+ llvm::Constant *GetAddrOfConstantString(llvm::StringRef Str,
const char *GlobalName=0);
/// GetAddrOfConstantCString - Returns a pointer to a character array
@@ -451,13 +473,15 @@ public:
/// GetAddrOfCXXConstructor - Return the address of the constructor of the
/// given type.
- llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
- CXXCtorType Type);
+ llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
+ CXXCtorType ctorType,
+ const CGFunctionInfo *fnInfo = 0);
/// GetAddrOfCXXDestructor - Return the address of the constructor of the
/// given type.
- llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
- CXXDtorType Type);
+ llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
+ CXXDtorType dtorType,
+ const CGFunctionInfo *fnInfo = 0);
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
@@ -502,10 +526,8 @@ public:
///@}
- void UpdateCompletedType(const TagDecl *TD) {
- // Make sure that this type is translated.
- Types.UpdateCompletedType(TD);
- }
+ // UpdateCompleteType - Make sure that this type is translated.
+ void UpdateCompletedType(const TagDecl *TD);
llvm::Constant *getMemberPointerConstant(const UnaryOperator *e);
@@ -523,6 +545,9 @@ public:
llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
const AnnotateAttr *AA, unsigned LineNo);
+ /// Error - Emit a general error that something can't be done.
+ void Error(SourceLocation loc, llvm::StringRef error);
+
/// ErrorUnsupported - Print out an error that codegen doesn't support the
/// specified stmt yet.
/// \param OmitOnError - If true, then this error should only be emitted if no
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index 3f2c6ca..53e40b2 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -74,7 +74,8 @@ llvm::MDNode *CodeGenTBAA::getTBAAInfoForNamedType(llvm::StringRef NameStr,
};
// Create the mdnode.
- return llvm::MDNode::get(VMContext, Ops, llvm::array_lengthof(Ops) - !Flags);
+ unsigned Len = llvm::array_lengthof(Ops) - !Flags;
+ return llvm::MDNode::get(VMContext, llvm::ArrayRef<llvm::Value*>(Ops, Len));
}
static bool TypeHasMayAlias(QualType QTy) {
@@ -155,7 +156,7 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
// theoretically implement this by combining information about all the
// members into a single identifying MDNode.
if (!Features.CPlusPlus &&
- ETy->getDecl()->getTypedefForAnonDecl())
+ ETy->getDecl()->getTypedefNameForAnonDecl())
return MetadataCache[Ty] = getChar();
// In C++ mode, types have linkage, so we can rely on the ODR and
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 5254922..8db6fe5 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -65,6 +65,36 @@ void CodeGenTypes::HandleLateResolvedPointers() {
}
}
+void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty,
+ llvm::StringRef suffix) {
+ llvm::SmallString<256> TypeName;
+ llvm::raw_svector_ostream OS(TypeName);
+ OS << RD->getKindName() << '.';
+
+ // Name the codegen type after the typedef name
+ // if there is no tag type name available
+ if (RD->getIdentifier()) {
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ if (RD->getDeclContext())
+ OS << RD->getQualifiedNameAsString();
+ else
+ RD->printName(OS);
+ } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ if (TDD->getDeclContext())
+ OS << TDD->getQualifiedNameAsString();
+ else
+ TDD->printName(OS);
+ } else
+ OS << "anon";
+
+ if (!suffix.empty())
+ OS << suffix;
+
+ TheModule.addTypeName(OS.str(), Ty);
+}
/// ConvertType - Convert the specified type to its LLVM form.
const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) {
@@ -199,7 +229,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
- assert(false && "Non-canonical or dependent types aren't possible.");
+ llvm_unreachable("Non-canonical or dependent types aren't possible.");
break;
case Type::Builtin: {
@@ -253,10 +283,12 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::Overload:
case BuiltinType::Dependent:
- assert(0 && "Unexpected builtin type!");
+ case BuiltinType::BoundMember:
+ case BuiltinType::UnknownAny:
+ llvm_unreachable("Unexpected placeholder builtin type!");
break;
}
- assert(0 && "Unknown builtin type!");
+ llvm_unreachable("Unknown builtin type!");
break;
}
case Type::Complex: {
@@ -270,14 +302,16 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
QualType ETy = RTy.getPointeeType();
llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
- return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+ unsigned AS = Context.getTargetAddressSpace(ETy);
+ return llvm::PointerType::get(PointeeType, AS);
}
case Type::Pointer: {
const PointerType &PTy = cast<PointerType>(Ty);
QualType ETy = PTy.getPointeeType();
llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
- return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+ unsigned AS = Context.getTargetAddressSpace(ETy);
+ return llvm::PointerType::get(PointeeType, AS);
}
case Type::VariableArray: {
@@ -371,30 +405,8 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
const TagDecl *TD = cast<TagType>(Ty).getDecl();
const llvm::Type *Res = ConvertTagDeclType(TD);
- llvm::SmallString<256> TypeName;
- llvm::raw_svector_ostream OS(TypeName);
- OS << TD->getKindName() << '.';
-
- // Name the codegen type after the typedef name
- // if there is no tag type name available
- if (TD->getIdentifier()) {
- // FIXME: We should not have to check for a null decl context here.
- // Right now we do it because the implicit Obj-C decls don't have one.
- if (TD->getDeclContext())
- OS << TD->getQualifiedNameAsString();
- else
- TD->printName(OS);
- } else if (const TypedefDecl *TDD = TD->getTypedefForAnonDecl()) {
- // FIXME: We should not have to check for a null decl context here.
- // Right now we do it because the implicit Obj-C decls don't have one.
- if (TDD->getDeclContext())
- OS << TDD->getQualifiedNameAsString();
- else
- TDD->printName(OS);
- } else
- OS << "anon";
-
- TheModule.addTypeName(OS.str(), Res);
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(TD))
+ addRecordTypeName(RD, Res, llvm::StringRef());
return Res;
}
@@ -402,7 +414,8 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
- return llvm::PointerType::get(PointeeType, FTy.getAddressSpace());
+ unsigned AS = Context.getTargetAddressSpace(FTy);
+ return llvm::PointerType::get(PointeeType, AS);
}
case Type::MemberPointer: {
@@ -500,6 +513,15 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
return *Layout;
}
+void CodeGenTypes::addBaseSubobjectTypeName(const CXXRecordDecl *RD,
+ const CGRecordLayout &layout) {
+ llvm::StringRef suffix;
+ if (layout.getBaseSubobjectLLVMType() != layout.getLLVMType())
+ suffix = ".base";
+
+ addRecordTypeName(RD, layout.getBaseSubobjectLLVMType(), suffix);
+}
+
bool CodeGenTypes::isZeroInitializable(QualType T) {
// No need to check for member pointers when not compiling C++.
if (!Context.getLangOptions().CPlusPlus)
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 41513da..dc383cb 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -101,6 +101,11 @@ private:
/// used to handle cyclic structures properly.
void HandleLateResolvedPointers();
+ /// addRecordTypeName - Compute a name from the given record decl with an
+ /// optional suffix and name the given LLVM type using it.
+ void addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty,
+ llvm::StringRef suffix);
+
public:
CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
const ABIInfo &Info, CGCXXABI &CXXABI);
@@ -145,10 +150,19 @@ public:
const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
+ /// addBaseSubobjectTypeName - Add a type name for the base subobject of the
+ /// given record layout.
+ void addBaseSubobjectTypeName(const CXXRecordDecl *RD,
+ const CGRecordLayout &layout);
+
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
void UpdateCompletedType(const TagDecl *TD);
+ /// getNullaryFunctionInfo - Get the function info for a void()
+ /// function with standard CC.
+ const CGFunctionInfo &getNullaryFunctionInfo();
+
/// getFunctionInfo - Get the function info for the specified function decl.
const CGFunctionInfo &getFunctionInfo(GlobalDecl GD);
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 95654a3..33abf3a 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This provides C++ code generation targetting the Itanium C++ ABI. The class
+// This provides C++ code generation targeting the Itanium C++ ABI. The class
// in this file generates structures that follow the Itanium C++ ABI, which is
// documented at:
// http://www.codesourcery.com/public/cxx-abi/abi.html
@@ -282,8 +282,7 @@ ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
// We're done.
CGF.EmitBlock(FnEnd);
- llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo());
- Callee->reserveOperandSpace(2);
+ llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2);
Callee->addIncoming(VirtualFn, FnVirtual);
Callee->addIncoming(NonVirtualFn, FnNonVirtual);
return Callee;
@@ -515,10 +514,10 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
if (MD->isVirtual()) {
uint64_t Index = CGM.getVTables().getMethodVTableIndex(MD);
- // FIXME: We shouldn't use / 8 here.
- uint64_t PointerWidthInBytes =
- getContext().Target.getPointerWidth(0) / 8;
- uint64_t VTableOffset = (Index * PointerWidthInBytes);
+ const ASTContext &Context = getContext();
+ CharUnits PointerWidth =
+ Context.toCharUnitsFromBits(Context.Target.getPointerWidth(0));
+ uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
if (IsARM) {
// ARM C++ ABI 3.2.1:
@@ -538,20 +537,21 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
}
} else {
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
const llvm::Type *Ty;
// Check whether the function has a computable LLVM signature.
if (!CodeGenTypes::VerifyFuncTypeComplete(FPT)) {
// The function has a computable LLVM signature; use the correct type.
- Ty = Types.GetFunctionType(Types.getFunctionInfo(MD), FPT->isVariadic());
+ Ty = Types.GetFunctionType(Types.getFunctionInfo(MD),
+ FPT->isVariadic());
} else {
// Use an arbitrary non-function type to tell GetAddrOfFunction that the
// function type is incomplete.
Ty = ptrdiff_t;
}
+ llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
- llvm::Constant *Addr = CGM.GetAddrOfFunction(MD, Ty);
- MemPtr[0] = llvm::ConstantExpr::getPtrToInt(Addr, ptrdiff_t);
+ MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, ptrdiff_t);
MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
}
@@ -651,20 +651,21 @@ ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
}
- // In Itanium, a member function pointer is null if 'ptr' is null.
+ // In Itanium, a member function pointer is not null if 'ptr' is not null.
llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
- // In ARM, it's that, plus the low bit of 'adj' must be zero.
+ // On ARM, a member function pointer is also non-null if the low bit of 'adj'
+ // (the virtual bit) is set.
if (IsARM) {
llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
- llvm::Value *IsNotVirtual = Builder.CreateICmpEQ(VirtualBit, Zero,
- "memptr.notvirtual");
- Result = Builder.CreateAnd(Result, IsNotVirtual);
+ llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
+ "memptr.isvirtual");
+ Result = Builder.CreateOr(Result, IsVirtual);
}
return Result;
@@ -745,7 +746,7 @@ void ItaniumCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
ImplicitParamDecl *VTTDecl
= ImplicitParamDecl::Create(Context, 0, MD->getLocation(),
&Context.Idents.get("vtt"), T);
- Params.push_back(std::make_pair(VTTDecl, VTTDecl->getType()));
+ Params.push_back(VTTDecl);
getVTTDecl(CGF) = VTTDecl;
}
}
@@ -757,7 +758,7 @@ void ARMCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
// Return 'this' from certain constructors and destructors.
if (HasThisReturn(CGF.CurGD))
- ResTy = Params[0].second;
+ ResTy = Params[0]->getType();
}
void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
@@ -1064,10 +1065,18 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// global initialization is always single-threaded.
bool ThreadsafeStatics = (getContext().getLangOptions().ThreadsafeStatics &&
D.isLocalVarDecl());
-
- // Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
- const llvm::IntegerType *GuardTy
- = (IsARM ? Builder.getInt32Ty() : Builder.getInt64Ty());
+
+ const llvm::IntegerType *GuardTy;
+
+ // If we have a global variable with internal linkage and thread-safe statics
+ // are disabled, we can just let the guard variable be of type i8.
+ bool UseInt8GuardVariable = !ThreadsafeStatics && GV->hasInternalLinkage();
+ if (UseInt8GuardVariable)
+ GuardTy = Builder.getInt8Ty();
+ else {
+ // Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
+ GuardTy = (IsARM ? Builder.getInt32Ty() : Builder.getInt64Ty());
+ }
const llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo();
// Create the guard variable.
@@ -1098,7 +1107,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// if (__cxa_guard_acquire(&obj_guard))
// ...
// }
- if (IsARM) {
+ if (IsARM && !UseInt8GuardVariable) {
llvm::Value *V = Builder.CreateLoad(GuardVariable);
V = Builder.CreateAnd(V, Builder.getInt32(1));
IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 3a63eba..747e5e3 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This provides C++ code generation targetting the Microsoft Visual C++ ABI.
+// This provides C++ code generation targeting the Microsoft Visual C++ ABI.
// The class in this file generates structures that follow the Microsoft
// Visual C++ ABI, which is actually not very well documented at all outside
// of Microsoft.
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 2ffc840..bc2472c 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -16,6 +16,7 @@
#include "ABIInfo.h"
#include "CodeGenFunction.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Type.h"
#include "llvm/Target/TargetData.h"
#include "llvm/ADT/Triple.h"
@@ -358,7 +359,7 @@ bool UseX86_MMXType(const llvm::Type *IRType) {
static const llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
llvm::StringRef Constraint,
const llvm::Type* Ty) {
- if (Constraint=="y" && Ty->isVectorTy())
+ if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy())
return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
return Ty;
}
@@ -864,6 +865,15 @@ class X86_64ABIInfo : public ABIInfo {
unsigned &neededInt,
unsigned &neededSSE) const;
+ /// The 0.98 ABI revision clarified a lot of ambiguities,
+ /// unfortunately in ways that were not always consistent with
+ /// certain previous compilers. In particular, platforms which
+ /// required strict binary compatibility with older versions of GCC
+ /// may need to exempt themselves.
+ bool honorsRevision0_98() const {
+ return !getContext().Target.getTriple().isOSDarwin();
+ }
+
public:
X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
@@ -1252,15 +1262,24 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// (a) If one of the classes is MEMORY, the whole argument is
// passed in memory.
//
- // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
-
- // The first of these conditions is guaranteed by how we implement
- // the merge (just bail).
+ // (b) If X87UP is not preceded by X87, the whole argument is
+ // passed in memory.
+ //
+ // (c) If the size of the aggregate exceeds two eightbytes and the first
+ // eight-byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole
+ // argument is passed in memory.
+ //
+ // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
+ //
+ // Some of these are enforced by the merging logic. Others can arise
+ // only with unions; for example:
+ // union { _Complex double; unsigned; }
//
- // The second condition occurs in the case of unions; for example
- // union { _Complex double; unsigned; }.
+ // Note that clauses (b) and (c) were added in 0.98.
if (Hi == Memory)
Lo = Memory;
+ if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
+ Lo = Memory;
if (Hi == SSEUp && Lo != SSE)
Hi = SSE;
}
@@ -1689,7 +1708,7 @@ classifyReturnType(QualType RetTy) const {
// AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
// is passed in the upper half of the last used SSE register.
//
- // SSEUP should always be preceeded by SSE, just widen.
+ // SSEUP should always be preceded by SSE, just widen.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification.");
ResType = Get16ByteVectorType(RetTy);
@@ -1698,9 +1717,9 @@ classifyReturnType(QualType RetTy) const {
// AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
// returned together with the previous X87 value in %st0.
case X87Up:
- // If X87Up is preceeded by X87, we don't need to do
+ // If X87Up is preceded by X87, we don't need to do
// anything. However, in some cases with unions it may not be
- // preceeded by X87. In such situations we follow gcc and pass the
+ // preceded by X87. In such situations we follow gcc and pass the
// extra bits in an SSE reg.
if (Lo != X87) {
HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
@@ -1799,7 +1818,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
const llvm::Type *HighPart = 0;
switch (Hi) {
// Memory was handled previously, ComplexX87 and X87 should
- // never occur as hi classes, and X87Up must be preceed by X87,
+ // never occur as hi classes, and X87Up must be preceded by X87,
// which is passed in memory.
case Memory:
case X87:
@@ -2082,9 +2101,8 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// Return the appropriate result.
CGF.EmitBlock(ContBlock);
- llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
+ llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
"vaarg.addr");
- ResAddr->reserveOperandSpace(2);
ResAddr->addIncoming(RegAddr, InRegBlock);
ResAddr->addIncoming(MemAddr, InMemBlock);
return ResAddr;
@@ -2271,27 +2289,33 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
it != ie; ++it)
it->info = classifyArgumentType(it->type);
- const llvm::Triple &Triple(getContext().Target.getTriple());
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ // Calling convention as default by an ABI.
llvm::CallingConv::ID DefaultCC;
- if (Triple.getEnvironmentName() == "gnueabi" ||
- Triple.getEnvironmentName() == "eabi")
+ llvm::StringRef Env = getContext().Target.getTriple().getEnvironmentName();
+ if (Env == "gnueabi" || Env == "eabi")
DefaultCC = llvm::CallingConv::ARM_AAPCS;
else
DefaultCC = llvm::CallingConv::ARM_APCS;
+ // If user did not ask for specific calling convention explicitly (e.g. via
+ // pcs attribute), set effective calling convention if it's different than ABI
+ // default.
switch (getABIKind()) {
case APCS:
if (DefaultCC != llvm::CallingConv::ARM_APCS)
FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
break;
-
case AAPCS:
if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
break;
-
case AAPCS_VFP:
- FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
+ if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
break;
}
}
@@ -2317,22 +2341,26 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
// Otherwise, pass by coercing to a structure of the appropriate size.
//
- // FIXME: This is kind of nasty... but there isn't much choice because the ARM
- // backend doesn't support byval.
// FIXME: This doesn't handle alignment > 64 bits.
const llvm::Type* ElemTy;
unsigned SizeRegs;
- if (getContext().getTypeAlign(Ty) > 32) {
- ElemTy = llvm::Type::getInt64Ty(getVMContext());
- SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
- } else {
+ if (getContext().getTypeSizeInChars(Ty) <= CharUnits::fromQuantity(64)) {
ElemTy = llvm::Type::getInt32Ty(getVMContext());
SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ } else if (getABIKind() == ARMABIInfo::APCS) {
+ // Initial ARM ByVal support is APCS-only.
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ } else {
+ // FIXME: This is kind of nasty... but there isn't much choice
+ // because most of the ARM calling conventions don't yet support
+ // byval.
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
}
- std::vector<const llvm::Type*> LLVMFields;
- LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
- const llvm::Type* STy = llvm::StructType::get(getVMContext(), LLVMFields,
- true);
+
+ const llvm::Type *STy =
+ llvm::StructType::get(getVMContext(),
+ llvm::ArrayType::get(ElemTy, SizeRegs), NULL, NULL);
return ABIArgInfo::getDirect(STy);
}
@@ -2516,6 +2544,74 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
}
//===----------------------------------------------------------------------===//
+// PTX ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class PTXABIInfo : public ABIInfo {
+public:
+ PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CFG) const;
+};
+
+class PTXTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PTXTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new PTXABIInfo(CGT)) {}
+};
+
+ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+ return ABIArgInfo::getDirect();
+}
+
+ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ return ABIArgInfo::getDirect();
+}
+
+void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ // Calling convention as default by an ABI.
+ llvm::CallingConv::ID DefaultCC;
+ llvm::StringRef Env = getContext().Target.getTriple().getEnvironmentName();
+ if (Env == "device")
+ DefaultCC = llvm::CallingConv::PTX_Device;
+ else
+ DefaultCC = llvm::CallingConv::PTX_Kernel;
+
+ FI.setEffectiveCallingConvention(DefaultCC);
+}
+
+llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CFG) const {
+ llvm_unreachable("PTX does not support varargs");
+ return 0;
+}
+
+}
+
+//===----------------------------------------------------------------------===//
// SystemZ ABI Implementation
//===----------------------------------------------------------------------===//
@@ -2815,17 +2911,24 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::arm:
case llvm::Triple::thumb:
- // FIXME: We want to know the float calling convention as well.
- if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
- return *(TheTargetCodeGenInfo =
- new ARMTargetCodeGenInfo(Types, ARMABIInfo::APCS));
+ {
+ ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
- return *(TheTargetCodeGenInfo =
- new ARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS));
+ if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
+ Kind = ARMABIInfo::APCS;
+ else if (CodeGenOpts.FloatABI == "hard")
+ Kind = ARMABIInfo::AAPCS_VFP;
+
+ return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
+ }
case llvm::Triple::ppc:
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
+ case llvm::Triple::ptx32:
+ case llvm::Triple::ptx64:
+ return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types));
+
case llvm::Triple::systemz:
return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
@@ -2836,10 +2939,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
case llvm::Triple::x86:
- switch (Triple.getOS()) {
- case llvm::Triple::Darwin:
+ if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
new X86_32TargetCodeGenInfo(Types, true, true));
+
+ switch (Triple.getOS()) {
case llvm::Triple::Cygwin:
case llvm::Triple::MinGW32:
case llvm::Triple::AuroraUX:
OpenPOWER on IntegriCloud