summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/BackendUtil.cpp75
-rw-r--r--lib/CodeGen/CGBlocks.cpp64
-rw-r--r--lib/CodeGen/CGBuiltin.cpp59
-rw-r--r--lib/CodeGen/CGCXX.cpp24
-rw-r--r--lib/CodeGen/CGCall.cpp120
-rw-r--r--lib/CodeGen/CGCall.h9
-rw-r--r--lib/CodeGen/CGClass.cpp262
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp60
-rw-r--r--lib/CodeGen/CGDebugInfo.h2
-rw-r--r--lib/CodeGen/CGDecl.cpp19
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp12
-rw-r--r--lib/CodeGen/CGException.cpp188
-rw-r--r--lib/CodeGen/CGExpr.cpp5
-rw-r--r--lib/CodeGen/CGExprAgg.cpp2
-rw-r--r--lib/CodeGen/CGExprCXX.cpp437
-rw-r--r--lib/CodeGen/CGExprConstant.cpp11
-rw-r--r--lib/CodeGen/CGExprScalar.cpp100
-rw-r--r--lib/CodeGen/CGObjC.cpp100
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp99
-rw-r--r--lib/CodeGen/CGObjCMac.cpp755
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp13
-rw-r--r--lib/CodeGen/CGObjCRuntime.h7
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp8
-rw-r--r--lib/CodeGen/CGStmt.cpp12
-rw-r--r--lib/CodeGen/CGVTT.cpp2
-rw-r--r--lib/CodeGen/CGVTables.cpp183
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp64
-rw-r--r--lib/CodeGen/CodeGenFunction.h25
-rw-r--r--lib/CodeGen/CodeGenModule.cpp189
-rw-r--r--lib/CodeGen/CodeGenModule.h10
-rw-r--r--lib/CodeGen/CodeGenTypes.h5
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp14
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp2
-rw-r--r--lib/CodeGen/TargetInfo.cpp90
34 files changed, 1945 insertions, 1082 deletions
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 1264473..01d15ff 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -21,12 +21,11 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/StandardPasses.h"
+#include "llvm/Support/PassManagerBuilder.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/SubtargetFeature.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegistry.h"
@@ -109,67 +108,62 @@ void EmitAssemblyHelper::CreatePasses() {
OptLevel = 0;
Inlining = CodeGenOpts.NoInlining;
}
-
- FunctionPassManager *FPM = getPerFunctionPasses();
-
- TargetLibraryInfo *TLI =
- new TargetLibraryInfo(Triple(TheModule->getTargetTriple()));
+
+ PassManagerBuilder PMBuilder;
+ PMBuilder.OptLevel = OptLevel;
+ PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
+
+ PMBuilder.DisableSimplifyLibCalls = !CodeGenOpts.SimplifyLibCalls;
+ PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime;
+ PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
+
+ // Figure out TargetLibraryInfo.
+ Triple TargetTriple(TheModule->getTargetTriple());
+ PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple);
if (!CodeGenOpts.SimplifyLibCalls)
- TLI->disableAllFunctions();
- FPM->add(TLI);
-
- // In -O0 if checking is disabled, we don't even have per-function passes.
- if (CodeGenOpts.VerifyModule)
- FPM->add(createVerifierPass());
-
- // Assume that standard function passes aren't run for -O0.
- if (OptLevel > 0)
- llvm::createStandardFunctionPasses(FPM, OptLevel);
-
- llvm::Pass *InliningPass = 0;
+ PMBuilder.LibraryInfo->disableAllFunctions();
+
switch (Inlining) {
case CodeGenOptions::NoInlining: break;
case CodeGenOptions::NormalInlining: {
- // Set the inline threshold following llvm-gcc.
- //
// FIXME: Derive these constants in a principled fashion.
unsigned Threshold = 225;
- if (CodeGenOpts.OptimizeSize == 1) //-Os
+ if (CodeGenOpts.OptimizeSize == 1) // -Os
Threshold = 75;
- else if (CodeGenOpts.OptimizeSize == 2) //-Oz
+ else if (CodeGenOpts.OptimizeSize == 2) // -Oz
Threshold = 25;
else if (OptLevel > 2)
Threshold = 275;
- InliningPass = createFunctionInliningPass(Threshold);
+ PMBuilder.Inliner = createFunctionInliningPass(Threshold);
break;
}
case CodeGenOptions::OnlyAlwaysInlining:
- InliningPass = createAlwaysInlinerPass(); // Respect always_inline
+ // Respect always_inline.
+ PMBuilder.Inliner = createAlwaysInlinerPass();
break;
}
- PassManager *MPM = getPerModulePasses();
+
+ // Set up the per-function pass manager.
+ FunctionPassManager *FPM = getPerFunctionPasses();
+ if (CodeGenOpts.VerifyModule)
+ FPM->add(createVerifierPass());
+ PMBuilder.populateFunctionPassManager(*FPM);
- TLI = new TargetLibraryInfo(Triple(TheModule->getTargetTriple()));
- if (!CodeGenOpts.SimplifyLibCalls)
- TLI->disableAllFunctions();
- MPM->add(TLI);
+ // Set up the per-module pass manager.
+ PassManager *MPM = getPerModulePasses();
if (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes) {
MPM->add(createGCOVProfilerPass(CodeGenOpts.EmitGcovNotes,
- CodeGenOpts.EmitGcovArcs));
+ CodeGenOpts.EmitGcovArcs,
+ TargetTriple.isMacOSX()));
+
if (!CodeGenOpts.DebugInfo)
MPM->add(createStripSymbolsPass(true));
}
-
- // For now we always create per module passes.
- llvm::createStandardModulePasses(MPM, OptLevel,
- CodeGenOpts.OptimizeSize,
- CodeGenOpts.UnitAtATime,
- CodeGenOpts.UnrollLoops,
- CodeGenOpts.SimplifyLibCalls,
- /*HaveExceptions=*/true,
- InliningPass);
+
+
+ PMBuilder.populateModulePassManager(*MPM);
}
bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
@@ -215,7 +209,6 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
llvm::UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
llvm::UseSoftFloat = CodeGenOpts.SoftFloat;
- UnwindTablesMandatory = CodeGenOpts.UnwindTables;
TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 99a69a4..e5da703 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -189,23 +189,6 @@ namespace {
}
}
-/// Determines if the given record type has a mutable field.
-static bool hasMutableField(const CXXRecordDecl *record) {
- for (CXXRecordDecl::field_iterator
- i = record->field_begin(), e = record->field_end(); i != e; ++i)
- if ((*i)->isMutable())
- return true;
-
- for (CXXRecordDecl::base_class_const_iterator
- i = record->bases_begin(), e = record->bases_end(); i != e; ++i) {
- const RecordType *record = i->getType()->castAs<RecordType>();
- if (hasMutableField(cast<CXXRecordDecl>(record->getDecl())))
- return true;
- }
-
- return false;
-}
-
/// Determines if the given type is safe for constant capture in C++.
static bool isSafeForCXXConstantCapture(QualType type) {
const RecordType *recordType =
@@ -222,7 +205,7 @@ static bool isSafeForCXXConstantCapture(QualType type) {
// Otherwise, we just have to make sure there aren't any mutable
// fields that might have changed since initialization.
- return !hasMutableField(record);
+ return !record->hasMutableFields();
}
/// It is illegal to modify a const object after initialization.
@@ -262,7 +245,7 @@ static CharUnits getLowBit(CharUnits v) {
}
static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
- std::vector<const llvm::Type*> &elementTypes) {
+ llvm::SmallVectorImpl<const llvm::Type*> &elementTypes) {
ASTContext &C = CGM.getContext();
// The header is basically a 'struct { void *; int; int; void *; void *; }'.
@@ -299,7 +282,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
ASTContext &C = CGM.getContext();
const BlockDecl *block = info.getBlockDecl();
- std::vector<const llvm::Type*> elementTypes;
+ llvm::SmallVector<const llvm::Type*, 8> elementTypes;
initializeForBlockHeader(CGM, info, elementTypes);
if (!block->hasCaptures()) {
@@ -321,7 +304,11 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
const DeclContext *DC = block->getDeclContext();
for (; isa<BlockDecl>(DC); DC = cast<BlockDecl>(DC)->getDeclContext())
;
- QualType thisType = cast<CXXMethodDecl>(DC)->getThisType(C);
+ QualType thisType;
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC))
+ thisType = C.getPointerType(C.getRecordType(RD));
+ else
+ thisType = cast<CXXMethodDecl>(DC)->getThisType(C);
const llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
std::pair<CharUnits,CharUnits> tinfo
@@ -720,9 +707,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy, "tmp");
// Add the block literal.
- QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy);
CallArgList Args;
- Args.add(RValue::get(BlockLiteral), VoidPtrTy);
+ Args.add(RValue::get(BlockLiteral), getContext().VoidPtrTy);
QualType FnType = BPT->getPointeeType();
@@ -1063,6 +1049,10 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
IdentifierInfo *II
= &CGM.getContext().Idents.get("__copy_helper_block_");
+ // Check if we should generate debug info for this block helper function.
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
FunctionDecl *FD = FunctionDecl::Create(C,
C.getTranslationUnitDecl(),
SourceLocation(),
@@ -1150,6 +1140,10 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__destroy_helper_block_", &CGM.getModule());
+ // Check if we should generate debug info for this block destroy function.
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
IdentifierInfo *II
= &CGM.getContext().Idents.get("__destroy_helper_block_");
@@ -1508,29 +1502,29 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
QualType Ty = D->getType();
- std::vector<const llvm::Type *> Types;
+ llvm::SmallVector<const llvm::Type *, 8> types;
llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(getLLVMContext());
// void *__isa;
- Types.push_back(Int8PtrTy);
+ types.push_back(Int8PtrTy);
// void *__forwarding;
- Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
+ types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
// int32_t __flags;
- Types.push_back(Int32Ty);
+ types.push_back(Int32Ty);
// int32_t __size;
- Types.push_back(Int32Ty);
+ types.push_back(Int32Ty);
bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty);
if (HasCopyAndDispose) {
/// void *__copy_helper;
- Types.push_back(Int8PtrTy);
+ types.push_back(Int8PtrTy);
/// void *__destroy_helper;
- Types.push_back(Int8PtrTy);
+ types.push_back(Int8PtrTy);
}
bool Packed = false;
@@ -1553,11 +1547,11 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
if (NumPaddingBytes > 0) {
const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
// FIXME: We need a sema error for alignment larger than the minimum of
- // the maximal stack alignmint and the alignment of malloc on the system.
+ // the maximal stack alignment and the alignment of malloc on the system.
if (NumPaddingBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
- Types.push_back(Ty);
+ types.push_back(Ty);
// We want a packed struct.
Packed = true;
@@ -1565,9 +1559,9 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
}
// T x;
- Types.push_back(ConvertTypeForMem(Ty));
+ types.push_back(ConvertTypeForMem(Ty));
- const llvm::Type *T = llvm::StructType::get(getLLVMContext(), Types, Packed);
+ const llvm::Type *T = llvm::StructType::get(getLLVMContext(), types, Packed);
cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
@@ -1575,7 +1569,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
Info.first = ByRefTypeHolder.get();
- Info.second = Types.size() - 1;
+ Info.second = types.size() - 1;
return Info.first;
}
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 7a0c8da..14bebaf 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -164,9 +164,8 @@ static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
}
// The prototype is something that takes and returns whatever V's type is.
- std::vector<const llvm::Type*> Args;
- Args.push_back(V->getType());
- llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), Args, false);
+ llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
+ false);
llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
return CGF.Builder.CreateCall(Fn, V, "abs");
@@ -1186,6 +1185,41 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops.begin(), Ops.end());
}
+ if (BuiltinID == ARM::BI__builtin_arm_ldrexd) {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
+
+ Value *LdPtr = EmitScalarExpr(E->getArg(0));
+ Value *Val = Builder.CreateCall(F, LdPtr, "ldrexd");
+
+ Value *Val0 = Builder.CreateExtractValue(Val, 1);
+ Value *Val1 = Builder.CreateExtractValue(Val, 0);
+ Val0 = Builder.CreateZExt(Val0, Int64Ty);
+ Val1 = Builder.CreateZExt(Val1, Int64Ty);
+
+ Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
+ Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
+ return Builder.CreateOr(Val, Val1);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_strexd) {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd);
+ llvm::Type *STy = llvm::StructType::get(getLLVMContext(), Int32Ty, Int32Ty,
+ NULL);
+
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int64Ty, One, "tmp");
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Builder.CreateStore(Val, Tmp);
+
+ Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
+ Val = Builder.CreateLoad(LdPtr);
+
+ Value *Arg0 = Builder.CreateExtractValue(Val, 0);
+ Value *Arg1 = Builder.CreateExtractValue(Val, 1);
+ Value *StPtr = EmitScalarExpr(E->getArg(1));
+ return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
+ }
+
llvm::SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
@@ -2143,16 +2177,21 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
- case X86::BI__builtin_ia32_loaddqu: {
- const llvm::Type *VecTy = ConvertType(E->getType());
- const llvm::Type *IntTy = llvm::IntegerType::get(getLLVMContext(), 128);
+ case X86::BI__builtin_ia32_movntps:
+ case X86::BI__builtin_ia32_movntpd:
+ case X86::BI__builtin_ia32_movntdq:
+ case X86::BI__builtin_ia32_movnti: {
+ llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
+ Builder.getInt32(1));
+ // Convert the type of the pointer to a pointer to the stored type.
Value *BC = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(IntTy),
+ llvm::PointerType::getUnqual(Ops[1]->getType()),
"cast");
- LoadInst *LI = Builder.CreateLoad(BC);
- LI->setAlignment(1); // Unaligned load.
- return Builder.CreateBitCast(LI, VecTy, "loadu.cast");
+ StoreInst *SI = Builder.CreateStore(Ops[1], BC);
+ SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ SI->setAlignment(16);
+ return SI;
}
// 3DNow!
case X86::BI__builtin_ia32_pavgusb:
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 184147c..f6fc202 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -28,17 +28,6 @@
using namespace clang;
using namespace CodeGen;
-/// Determines whether the given function has a trivial body that does
-/// not require any specific codegen.
-static bool HasTrivialBody(const FunctionDecl *FD) {
- Stmt *S = FD->getBody();
- if (!S)
- return true;
- if (isa<CompoundStmt>(S) && cast<CompoundStmt>(S)->body_empty())
- return true;
- return false;
-}
-
/// Try to emit a base destructor as an alias to its primary
/// base-class destructor.
bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
@@ -47,7 +36,7 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// If the destructor doesn't have a trivial body, we have to emit it
// separately.
- if (!HasTrivialBody(D))
+ if (!D->hasTrivialBody())
return true;
const CXXRecordDecl *Class = D->getParent();
@@ -187,7 +176,10 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
// The constructor used for constructing this as a complete class;
// constucts the virtual bases, then calls the base constructor.
- EmitGlobal(GlobalDecl(D, Ctor_Complete));
+ if (!D->getParent()->isAbstract()) {
+ // We don't need to emit the complete ctor if the class is abstract.
+ EmitGlobal(GlobalDecl(D, Ctor_Complete));
+ }
// The constructor used for constructing this as a base class;
// ignores virtual bases.
@@ -244,7 +236,11 @@ void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
// The destructor used for destructing this as a most-derived class;
// call the base destructor and then destructs any virtual bases.
- EmitGlobal(GlobalDecl(D, Dtor_Complete));
+ if (!D->getParent()->isAbstract() || D->isVirtual()) {
+ // We don't need to emit the complete ctor if the class is abstract,
+ // unless the destructor is virtual and needs to be in the vtable.
+ EmitGlobal(GlobalDecl(D, Dtor_Complete));
+ }
// The destructor used for destructing this as a base class; ignores
// virtual bases.
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index a765f0f..712ae89 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -309,10 +309,10 @@ CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
/***/
-void CodeGenTypes::GetExpandedTypes(QualType Ty,
- std::vector<const llvm::Type*> &ArgTys,
- bool IsRecursive) {
- const RecordType *RT = Ty->getAsStructureType();
+void CodeGenTypes::GetExpandedTypes(QualType type,
+ llvm::SmallVectorImpl<const llvm::Type*> &expandedTypes,
+ bool isRecursive) {
+ const RecordType *RT = type->getAsStructureType();
assert(RT && "Can only expand structure types.");
const RecordDecl *RD = RT->getDecl();
assert(!RD->hasFlexibleArrayMember() &&
@@ -324,11 +324,11 @@ void CodeGenTypes::GetExpandedTypes(QualType Ty,
assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
- QualType FT = FD->getType();
- if (CodeGenFunction::hasAggregateLLVMType(FT))
- GetExpandedTypes(FT, ArgTys, IsRecursive);
+ QualType fieldType = FD->getType();
+ if (fieldType->isRecordType())
+ GetExpandedTypes(fieldType, expandedTypes, isRecursive);
else
- ArgTys.push_back(ConvertType(FT, IsRecursive));
+ expandedTypes.push_back(ConvertType(fieldType, isRecursive));
}
}
@@ -513,6 +513,29 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
return CGF.Builder.CreateLoad(Tmp);
}
+// Function to store a first-class aggregate into memory. We prefer to
+// store the elements rather than the aggregate to be more friendly to
+// fast-isel.
+// FIXME: Do we need to recurse here?
+static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
+ llvm::Value *DestPtr, bool DestIsVolatile,
+ bool LowAlignment) {
+ // Prefer scalar stores to first-class aggregate stores.
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(Val->getType())) {
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
+ llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
+ llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
+ DestIsVolatile);
+ if (LowAlignment)
+ SI->setAlignment(1);
+ }
+ } else {
+ CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
+ }
+}
+
/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
/// where the source and destination may have different types.
///
@@ -553,7 +576,7 @@ static void CreateCoercedStore(llvm::Value *Src,
llvm::Value *Casted =
CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
// FIXME: Use better alignment / avoid requiring aligned store.
- CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
+ BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
} else {
// Otherwise do coercion through memory. This is stupid, but
// simple.
@@ -612,49 +635,49 @@ const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
}
const llvm::FunctionType *
-CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
- bool IsRecursive) {
- std::vector<const llvm::Type*> ArgTys;
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
+ bool isRecursive) {
+ llvm::SmallVector<const llvm::Type*, 8> argTypes;
+ const llvm::Type *resultType = 0;
- const llvm::Type *ResultType = 0;
-
- QualType RetTy = FI.getReturnType();
- const ABIArgInfo &RetAI = FI.getReturnInfo();
- switch (RetAI.getKind()) {
+ const ABIArgInfo &retAI = FI.getReturnInfo();
+ switch (retAI.getKind()) {
case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ llvm_unreachable("Invalid ABI kind for return argument");
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- ResultType = RetAI.getCoerceToType();
+ resultType = retAI.getCoerceToType();
break;
case ABIArgInfo::Indirect: {
- assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
- ResultType = llvm::Type::getVoidTy(getLLVMContext());
- const llvm::Type *STy = ConvertType(RetTy, IsRecursive);
- unsigned AS = Context.getTargetAddressSpace(RetTy);
- ArgTys.push_back(llvm::PointerType::get(STy, AS));
+ assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
+ resultType = llvm::Type::getVoidTy(getLLVMContext());
+
+ QualType ret = FI.getReturnType();
+ const llvm::Type *ty = ConvertType(ret, isRecursive);
+ unsigned addressSpace = Context.getTargetAddressSpace(ret);
+ argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
break;
}
case ABIArgInfo::Ignore:
- ResultType = llvm::Type::getVoidTy(getLLVMContext());
+ resultType = llvm::Type::getVoidTy(getLLVMContext());
break;
}
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = FI.arg_end(); it != ie; ++it) {
- const ABIArgInfo &AI = it->info;
+ const ABIArgInfo &argAI = it->info;
- switch (AI.getKind()) {
+ switch (argAI.getKind()) {
case ABIArgInfo::Ignore:
break;
case ABIArgInfo::Indirect: {
// indirect arguments are always on the stack, which is addr space #0.
- const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
- ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
+ const llvm::Type *LTy = ConvertTypeForMem(it->type, isRecursive);
+ argTypes.push_back(LTy->getPointerTo());
break;
}
@@ -663,23 +686,23 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
- const llvm::Type *ArgTy = AI.getCoerceToType();
- if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) {
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
- ArgTys.push_back(STy->getElementType(i));
+ const llvm::Type *argType = argAI.getCoerceToType();
+ if (const llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
+ for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
+ argTypes.push_back(st->getElementType(i));
} else {
- ArgTys.push_back(ArgTy);
+ argTypes.push_back(argType);
}
break;
}
case ABIArgInfo::Expand:
- GetExpandedTypes(it->type, ArgTys, IsRecursive);
+ GetExpandedTypes(it->type, argTypes, isRecursive);
break;
}
}
- return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
+ return llvm::FunctionType::get(resultType, argTypes, isVariadic);
}
const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
@@ -786,9 +809,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// sense to do it here because parameters are so messed up.
switch (AI.getKind()) {
case ABIArgInfo::Extend:
- if (ParamType->isSignedIntegerType())
+ if (ParamType->isSignedIntegerOrEnumerationType())
Attributes |= llvm::Attribute::SExt;
- else if (ParamType->isUnsignedIntegerType())
+ else if (ParamType->isUnsignedIntegerOrEnumerationType())
Attributes |= llvm::Attribute::ZExt;
// FALL THROUGH
case ABIArgInfo::Direct:
@@ -822,12 +845,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
continue;
case ABIArgInfo::Expand: {
- std::vector<const llvm::Type*> Tys;
+ llvm::SmallVector<const llvm::Type*, 8> types;
// FIXME: This is rather inefficient. Do we ever actually need to do
// anything here? The result should be just reconstructed on the other
// side, so extension should be a non-issue.
- getTypes().GetExpandedTypes(ParamType, Tys, false);
- Index += Tys.size();
+ getTypes().GetExpandedTypes(ParamType, types, false);
+ Index += types.size();
continue;
}
}
@@ -1166,6 +1189,15 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
type);
+ if (hasAggregateLLVMType(type) && isa<ImplicitCastExpr>(E) &&
+ cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
+ LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
+ assert(L.isSimple());
+ args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
+ type, /*NeedsCopy*/true);
+ return;
+ }
+
args.add(EmitAnyExprToTemp(E), type);
}
@@ -1231,6 +1263,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Alignment, I->Ty);
else
StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+ } else if (I->NeedsCopy && !ArgInfo.getIndirectByVal()) {
+ Args.push_back(CreateMemTemp(I->Ty));
+ EmitAggregateCopy(Args.back(), RV.getAggregateAddr(), I->Ty,
+ RV.isVolatileQualified());
} else {
Args.push_back(RV.getAggregateAddr());
}
@@ -1409,7 +1445,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
DestPtr = CreateMemTemp(RetTy, "agg.tmp");
DestIsVolatile = false;
}
- Builder.CreateStore(CI, DestPtr, DestIsVolatile);
+ BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
return RValue::getAggregate(DestPtr);
}
return RValue::get(CI);
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 3f600c0..160a62e 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -47,8 +47,9 @@ namespace CodeGen {
struct CallArg {
RValue RV;
QualType Ty;
- CallArg(RValue rv, QualType ty)
- : RV(rv), Ty(ty)
+ bool NeedsCopy;
+ CallArg(RValue rv, QualType ty, bool needscopy)
+ : RV(rv), Ty(ty), NeedsCopy(needscopy)
{ }
};
@@ -57,8 +58,8 @@ namespace CodeGen {
class CallArgList :
public llvm::SmallVector<CallArg, 16> {
public:
- void add(RValue rvalue, QualType type) {
- push_back(CallArg(rvalue, type));
+ void add(RValue rvalue, QualType type, bool needscopy = false) {
+ push_back(CallArg(rvalue, type, needscopy));
}
};
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index ca8b657..5725d80 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -520,6 +520,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
FunctionArgList &Args) {
assert(MemberInit->isAnyMemberInitializer() &&
"Must have member initializer!");
+ assert(MemberInit->getInit() && "Must have initializer!");
// non-static data member initializers.
FieldDecl *Field = MemberInit->getAnyMember();
@@ -726,12 +727,13 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
B != E; ++B) {
CXXCtorInitializer *Member = (*B);
- if (Member->isBaseInitializer())
+ if (Member->isBaseInitializer()) {
EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
- else if (Member->isAnyMemberInitializer())
+ } else {
+ assert(Member->isAnyMemberInitializer() &&
+ "Delegating initializer on non-delegating constructor");
MemberInitializers.push_back(Member);
- else
- llvm_unreachable("Delegating initializer on non-delegating constructor");
+ }
}
InitializeVTablePointers(ClassDecl);
@@ -740,6 +742,94 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
}
+static bool
+FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
+
+static bool
+HasTrivialDestructorBody(ASTContext &Context,
+ const CXXRecordDecl *BaseClassDecl,
+ const CXXRecordDecl *MostDerivedClassDecl)
+{
+ // If the destructor is trivial we don't have to check anything else.
+ if (BaseClassDecl->hasTrivialDestructor())
+ return true;
+
+ if (!BaseClassDecl->getDestructor()->hasTrivialBody())
+ return false;
+
+ // Check fields.
+ for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(),
+ E = BaseClassDecl->field_end(); I != E; ++I) {
+ const FieldDecl *Field = *I;
+
+ if (!FieldHasTrivialDestructorBody(Context, Field))
+ return false;
+ }
+
+ // Check non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end();
+ I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *NonVirtualBase =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+ if (!HasTrivialDestructorBody(Context, NonVirtualBase,
+ MostDerivedClassDecl))
+ return false;
+ }
+
+ if (BaseClassDecl == MostDerivedClassDecl) {
+ // Check virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end();
+ I != E; ++I) {
+ const CXXRecordDecl *VirtualBase =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+ if (!HasTrivialDestructorBody(Context, VirtualBase,
+ MostDerivedClassDecl))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+FieldHasTrivialDestructorBody(ASTContext &Context,
+ const FieldDecl *Field)
+{
+ QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
+
+ const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
+}
+
+/// CanSkipVTablePointerInitialization - Check whether we need to initialize
+/// any vtable pointers before calling this destructor.
+static bool CanSkipVTablePointerInitialization(ASTContext &Context,
+ const CXXDestructorDecl *Dtor) {
+ if (!Dtor->hasTrivialBody())
+ return false;
+
+ // Check the fields.
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
+ E = ClassDecl->field_end(); I != E; ++I) {
+ const FieldDecl *Field = *I;
+
+ if (!FieldHasTrivialDestructorBody(Context, Field))
+ return false;
+ }
+
+ return true;
+}
+
/// EmitDestructorBody - Emits the body of the current destructor.
void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
@@ -791,7 +881,8 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
EnterDtorCleanups(Dtor, Dtor_Base);
// Initialize the vtable pointers before entering the body.
- InitializeVTablePointers(Dtor->getParent());
+ if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
+ InitializeVTablePointers(Dtor->getParent());
if (isTryBody)
EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
@@ -1269,6 +1360,23 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
ReturnValueSlot(), DelegateArgs, Ctor);
}
+namespace {
+ struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *Addr;
+ CXXDtorType Type;
+
+ CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
+ CXXDtorType Type)
+ : Dtor(D), Addr(Addr), Type(Type) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
+ Addr);
+ }
+ };
+}
+
void
CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
const FunctionArgList &Args) {
@@ -1279,8 +1387,17 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
AggValueSlot AggSlot = AggValueSlot::forAddr(ThisPtr, false, /*Lifetime*/ true);
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
-}
+ const CXXRecordDecl *ClassDecl = Ctor->getParent();
+ if (CGM.getLangOptions().Exceptions && !ClassDecl->hasTrivialDestructor()) {
+ CXXDtorType Type =
+ CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
+
+ EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
+ ClassDecl->getDestructor(),
+ ThisPtr, Type);
+ }
+}
void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
@@ -1494,3 +1611,136 @@ llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
return Builder.CreateLoad(VTablePtrSrc, "vtable");
}
+
+static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
+ const Expr *E = Base;
+
+ while (true) {
+ E = E->IgnoreParens();
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ QualType DerivedType = E->getType();
+ if (const PointerType *PTy = DerivedType->getAs<PointerType>())
+ DerivedType = PTy->getPointeeType();
+
+ return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
+}
+
+// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
+// quite what we want.
+static const Expr *skipNoOpCastsAndParens(const Expr *E) {
+ while (true) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ continue;
+ }
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_Extension) {
+ E = UO->getSubExpr();
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
+/// canDevirtualizeMemberFunctionCall - Checks whether the given virtual member
+/// function call on the given expr can be devirtualized.
+/// expr can be devirtualized.
+static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
+ const CXXMethodDecl *MD) {
+ // If the most derived class is marked final, we know that no subclass can
+ // override this member function and so we can devirtualize it. For example:
+ //
+ // struct A { virtual void f(); }
+ // struct B final : A { };
+ //
+ // void f(B *b) {
+ // b->f();
+ // }
+ //
+ const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
+ if (MostDerivedClassDecl->hasAttr<FinalAttr>())
+ return true;
+
+ // If the member function is marked 'final', we know that it can't be
+ // overridden and can therefore devirtualize it.
+ if (MD->hasAttr<FinalAttr>())
+ return true;
+
+ // Similarly, if the class itself is marked 'final' it can't be overridden
+ // and we can therefore devirtualize the member function call.
+ if (MD->getParent()->hasAttr<FinalAttr>())
+ return true;
+
+ Base = skipNoOpCastsAndParens(Base);
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // This is a record decl. We know the type and can devirtualize it.
+ return VD->getType()->isRecordType();
+ }
+
+ return false;
+ }
+
+ // We can always devirtualize calls on temporary object expressions.
+ if (isa<CXXConstructExpr>(Base))
+ return true;
+
+ // And calls on bound temporaries.
+ if (isa<CXXBindTemporaryExpr>(Base))
+ return true;
+
+ // Check if this is a call expr that returns a record type.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+ return CE->getCallReturnType()->isRecordType();
+
+ // We can't devirtualize the call.
+ return false;
+}
+
+static bool UseVirtualCall(ASTContext &Context,
+ const CXXOperatorCallExpr *CE,
+ const CXXMethodDecl *MD) {
+ if (!MD->isVirtual())
+ return false;
+
+ // When building with -fapple-kext, all calls must go through the vtable since
+ // the kernel linker can do runtime patching of vtables.
+ if (Context.getLangOptions().AppleKext)
+ return true;
+
+ return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD);
+}
+
+llvm::Value *
+CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ llvm::Value *This) {
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+
+ if (UseVirtualCall(getContext(), E, MD))
+ return BuildVirtualCall(MD, This, Ty);
+
+ return CGM.GetAddrOfFunction(MD, Ty);
+}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index f2e1c02..98d30db 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -335,10 +335,12 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
case BuiltinType::UShort:
case BuiltinType::UInt:
+ case BuiltinType::UInt128:
case BuiltinType::ULong:
case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
case BuiltinType::Short:
case BuiltinType::Int:
+ case BuiltinType::Int128:
case BuiltinType::Long:
case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
@@ -553,9 +555,12 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
// We don't set size information, but do specify where the typedef was
// declared.
unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
- llvm::DIType DbgTy = DBuilder.createTypedef(Src, Ty->getDecl()->getName(),
- Unit, Line);
- return DbgTy;
+ const TypedefNameDecl *TyDecl = Ty->getDecl();
+ llvm::DIDescriptor TydefContext =
+ getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
+
+ return
+ DBuilder.createTypedef(Src, TyDecl->getName(), Unit, Line, TydefContext);
}
llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
@@ -628,8 +633,7 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
FieldDecl *field = *I;
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are ignored
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((field), LastFD) ||
- CGM.getContext().ZeroBitfieldFollowsBitfield((field), LastFD)) {
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((field), LastFD)) {
--fieldNo;
continue;
}
@@ -1240,9 +1244,13 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ unsigned Flags = 0;
+ if (ID->getImplementation())
+ Flags |= llvm::DIDescriptor::FlagObjcClassComplete;
+
llvm::DIType RealDecl =
DBuilder.createStructType(Unit, ID->getName(), DefUnit,
- Line, Size, Align, 0,
+ Line, Size, Align, Flags,
Elements, RuntimeLang);
// Now that we have a real decl for the struct, replace anything using the
@@ -1439,6 +1447,9 @@ static QualType UnwrapTypeForDebugInfo(QualType T) {
case Type::Decltype:
T = cast<DecltypeType>(T)->getUnderlyingType();
break;
+ case Type::UnaryTransform:
+ T = cast<UnaryTransformType>(T)->getUnderlyingType();
+ break;
case Type::Attributed:
T = cast<AttributedType>(T)->getEquivalentType();
break;
@@ -1554,6 +1565,7 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
+ case Type::UnaryTransform:
case Type::Auto:
llvm_unreachable("type should have been unwrapped!");
return llvm::DIType();
@@ -1612,6 +1624,33 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
return llvm::DISubprogram();
}
+// getOrCreateFunctionType - Construct DIType. If it is a c++ method, include
+// implicit parameter "this".
+llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D, QualType FnType,
+ llvm::DIFile F) {
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ return getOrCreateMethodType(Method, F);
+ else if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
+ // Add "self" and "_cmd"
+ llvm::SmallVector<llvm::Value *, 16> Elts;
+
+ // First element is always return type. For 'void' functions it is NULL.
+ Elts.push_back(getOrCreateType(OMethod->getResultType(), F));
+ // "self" pointer is always first argument.
+ Elts.push_back(getOrCreateType(OMethod->getSelfDecl()->getType(), F));
+ // "cmd" pointer is always second argument.
+ Elts.push_back(getOrCreateType(OMethod->getCmdDecl()->getType(), F));
+ // Get rest of the arguments.
+ for (ObjCMethodDecl::param_iterator PI = OMethod->param_begin(),
+ PE = OMethod->param_end(); PI != PE; ++PI)
+ Elts.push_back(getOrCreateType((*PI)->getType(), F));
+
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+ return DBuilder.createSubroutineType(F, EltTypeArray);
+ }
+ return getOrCreateType(FnType, F);
+}
+
/// EmitFunctionStart - Constructs the debug code for entering a function -
/// "llvm.dbg.func.start.".
void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
@@ -1644,7 +1683,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
}
Name = getFunctionName(FD);
// Use mangled name as linkage name for c/c++ functions.
- LinkageName = CGM.getMangledName(GD);
+ if (!Fn->hasInternalLinkage())
+ LinkageName = CGM.getMangledName(GD);
if (LinkageName == Name)
LinkageName = llvm::StringRef();
if (FD->hasPrototype())
@@ -1652,6 +1692,9 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
if (const NamespaceDecl *NSDecl =
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNameSpace(NSDecl);
+ else if (const RecordDecl *RDecl =
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
+ FDContext = getContextDescriptor(cast<Decl>(RDecl->getDeclContext()));
// Collect template parameters.
TParamsArray = CollectFunctionTemplateParams(FD, Unit);
@@ -1672,11 +1715,10 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
unsigned LineNo = getLineNumber(CurLoc);
if (D->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
- llvm::DIType SPTy = getOrCreateType(FnType, Unit);
llvm::DISubprogram SPDecl = getFunctionDeclaration(D);
llvm::DISubprogram SP =
DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
- LineNo, SPTy,
+ LineNo, getOrCreateFunctionType(D, FnType, Unit),
Fn->hasInternalLinkage(), true/*definition*/,
Flags, CGM.getLangOptions().Optimize, Fn,
TParamsArray, SPDecl);
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 27d991b..6ec6b65 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -98,6 +98,8 @@ class CGDebugInfo {
llvm::DIType CreateEnumType(const EnumDecl *ED);
llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile F);
+ llvm::DIType getOrCreateFunctionType(const Decl *D, QualType FnType,
+ llvm::DIFile F);
llvm::DIType getOrCreateVTablePtrType(llvm::DIFile F);
llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N);
llvm::DIType CreatePointeeType(QualType PointeeTy, llvm::DIFile F);
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index c027375..8a1a853 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -51,6 +51,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::ImplicitParam:
case Decl::ClassTemplate:
case Decl::FunctionTemplate:
+ case Decl::TypeAliasTemplate:
case Decl::TemplateTemplateParm:
case Decl::ObjCMethod:
case Decl::ObjCCategory:
@@ -628,15 +629,14 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
emission.Address = DeclPtr;
// Emit debug info for local var declaration.
- if (CGDebugInfo *DI = getDebugInfo()) {
- assert(HaveInsertPoint() && "Unexpected unreachable point!");
-
- DI->setLocation(D.getLocation());
- if (Target.useGlobalsForAutomaticVariables()) {
- DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
- } else
- DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
- }
+ if (HaveInsertPoint())
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ if (Target.useGlobalsForAutomaticVariables()) {
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
+ } else
+ DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ }
return emission;
}
@@ -741,6 +741,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::GlobalValue::InternalLinkage,
constant, Name, 0, false, 0);
GV->setAlignment(alignment.getQuantity());
+ GV->setUnnamedAddr(true);
llvm::Value *SrcPtr = GV;
if (SrcPtr->getType() != BP)
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 45b0b96..178badd 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -117,19 +117,13 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
return;
}
- std::vector<const llvm::Type *> Params;
- Params.push_back(Int8PtrTy);
-
// Get the destructor function type
const llvm::Type *DtorFnTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- Params, false);
+ Int8PtrTy, false);
DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
- Params.clear();
- Params.push_back(DtorFnTy);
- Params.push_back(Int8PtrTy);
- Params.push_back(Int8PtrTy);
+ const llvm::Type *Params[] = { DtorFnTy, Int8PtrTy, Int8PtrTy };
// Get the __cxa_atexit function type
// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
@@ -248,6 +242,8 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
&CXXGlobalInits[0],
CXXGlobalInits.size());
AddGlobalCtor(Fn);
+ CXXGlobalInits.clear();
+ PrioritizedCXXGlobalInits.clear();
}
void CodeGenModule::EmitCXXGlobalDtorFunc() {
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 6cb9599..e8ad6da 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -112,11 +112,18 @@ static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
}
+llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
+
+ if (CGM.getLangOptions().SjLjExceptions)
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume");
+}
+
llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), Int8PtrTy,
- /*IsVarArgs=*/false);
+ llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
if (CGM.getLangOptions().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
@@ -354,13 +361,17 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
}
llvm::Value *CodeGenFunction::getExceptionSlot() {
- if (!ExceptionSlot) {
- const llvm::Type *i8p = llvm::Type::getInt8PtrTy(getLLVMContext());
- ExceptionSlot = CreateTempAlloca(i8p, "exn.slot");
- }
+ if (!ExceptionSlot)
+ ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
return ExceptionSlot;
}
+llvm::Value *CodeGenFunction::getEHSelectorSlot() {
+ if (!EHSelectorSlot)
+ EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
+ return EHSelectorSlot;
+}
+
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
if (!E->getSubExpr()) {
if (getInvokeDest()) {
@@ -563,47 +574,59 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
return LP;
}
+// This code contains a hack to work around a design flaw in
+// LLVM's EH IR which breaks semantics after inlining. This same
+// hack is implemented in llvm-gcc.
+//
+// The LLVM EH abstraction is basically a thin veneer over the
+// traditional GCC zero-cost design: for each range of instructions
+// in the function, there is (at most) one "landing pad" with an
+// associated chain of EH actions. A language-specific personality
+// function interprets this chain of actions and (1) decides whether
+// or not to resume execution at the landing pad and (2) if so,
+// provides an integer indicating why it's stopping. In LLVM IR,
+// the association of a landing pad with a range of instructions is
+// achieved via an invoke instruction, the chain of actions becomes
+// the arguments to the @llvm.eh.selector call, and the selector
+// call returns the integer indicator. Other than the required
+// presence of two intrinsic function calls in the landing pad,
+// the IR exactly describes the layout of the output code.
+//
+// A principal advantage of this design is that it is completely
+// language-agnostic; in theory, the LLVM optimizers can treat
+// landing pads neutrally, and targets need only know how to lower
+// the intrinsics to have a functioning exceptions system (assuming
+// that platform exceptions follow something approximately like the
+// GCC design). Unfortunately, landing pads cannot be combined in a
+// language-agnostic way: given selectors A and B, there is no way
+// to make a single landing pad which faithfully represents the
+// semantics of propagating an exception first through A, then
+// through B, without knowing how the personality will interpret the
+// (lowered form of the) selectors. This means that inlining has no
+// choice but to crudely chain invokes (i.e., to ignore invokes in
+// the inlined function, but to turn all unwindable calls into
+// invokes), which is only semantically valid if every unwind stops
+// at every landing pad.
+//
+// Therefore, the invoke-inline hack is to guarantee that every
+// landing pad has a catch-all.
+enum CleanupHackLevel_t {
+ /// A level of hack that requires that all landing pads have
+ /// catch-alls.
+ CHL_MandatoryCatchall,
+
+ /// A level of hack that requires that all landing pads handle
+ /// cleanups.
+ CHL_MandatoryCleanup,
+
+ /// No hacks at all; ideal IR generation.
+ CHL_Ideal
+};
+const CleanupHackLevel_t CleanupHackLevel = CHL_MandatoryCleanup;
+
llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
assert(EHStack.requiresLandingPad());
- // This function contains a hack to work around a design flaw in
- // LLVM's EH IR which breaks semantics after inlining. This same
- // hack is implemented in llvm-gcc.
- //
- // The LLVM EH abstraction is basically a thin veneer over the
- // traditional GCC zero-cost design: for each range of instructions
- // in the function, there is (at most) one "landing pad" with an
- // associated chain of EH actions. A language-specific personality
- // function interprets this chain of actions and (1) decides whether
- // or not to resume execution at the landing pad and (2) if so,
- // provides an integer indicating why it's stopping. In LLVM IR,
- // the association of a landing pad with a range of instructions is
- // achieved via an invoke instruction, the chain of actions becomes
- // the arguments to the @llvm.eh.selector call, and the selector
- // call returns the integer indicator. Other than the required
- // presence of two intrinsic function calls in the landing pad,
- // the IR exactly describes the layout of the output code.
- //
- // A principal advantage of this design is that it is completely
- // language-agnostic; in theory, the LLVM optimizers can treat
- // landing pads neutrally, and targets need only know how to lower
- // the intrinsics to have a functioning exceptions system (assuming
- // that platform exceptions follow something approximately like the
- // GCC design). Unfortunately, landing pads cannot be combined in a
- // language-agnostic way: given selectors A and B, there is no way
- // to make a single landing pad which faithfully represents the
- // semantics of propagating an exception first through A, then
- // through B, without knowing how the personality will interpret the
- // (lowered form of the) selectors. This means that inlining has no
- // choice but to crudely chain invokes (i.e., to ignore invokes in
- // the inlined function, but to turn all unwindable calls into
- // invokes), which is only semantically valid if every unwind stops
- // at every landing pad.
- //
- // Therefore, the invoke-inline hack is to guarantee that every
- // landing pad has a catch-all.
- const bool UseInvokeInlineHack = true;
-
for (EHScopeStack::iterator ir = EHStack.begin(); ; ) {
assert(ir != EHStack.end() &&
"stack requiring landing pad is nothing but non-EH scopes?");
@@ -736,16 +759,23 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
EHSelector.append(EHFilters.begin(), EHFilters.end());
// Also check whether we need a cleanup.
- if (UseInvokeInlineHack || HasEHCleanup)
- EHSelector.push_back(UseInvokeInlineHack
+ if (CleanupHackLevel == CHL_MandatoryCatchall || HasEHCleanup)
+ EHSelector.push_back(CleanupHackLevel == CHL_MandatoryCatchall
? getCatchAllValue(*this)
: getCleanupValue(*this));
// Otherwise, signal that we at least have cleanups.
- } else if (UseInvokeInlineHack || HasEHCleanup) {
- EHSelector.push_back(UseInvokeInlineHack
+ } else if (CleanupHackLevel == CHL_MandatoryCatchall || HasEHCleanup) {
+ EHSelector.push_back(CleanupHackLevel == CHL_MandatoryCatchall
? getCatchAllValue(*this)
: getCleanupValue(*this));
+
+ // At the MandatoryCleanup hack level, we don't need to actually
+ // spuriously tell the unwinder that we have cleanups, but we do
+ // need to always be prepared to handle cleanups.
+ } else if (CleanupHackLevel == CHL_MandatoryCleanup) {
+ // Just don't decrement LastToEmitInLoop.
+
} else {
assert(LastToEmitInLoop > 2);
LastToEmitInLoop--;
@@ -758,6 +788,10 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
EHSelector.begin(), EHSelector.end(), "eh.selector");
Selection->setDoesNotThrow();
+
+ // Save the selector value in mandatory-cleanup mode.
+ if (CleanupHackLevel == CHL_MandatoryCleanup)
+ Builder.CreateStore(Selection, getEHSelectorSlot());
// Select the right handler.
llvm::Value *llvm_eh_typeid_for =
@@ -833,22 +867,13 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// If there was a cleanup, we'll need to actually check whether we
// landed here because the filter triggered.
- if (UseInvokeInlineHack || HasEHCleanup) {
- llvm::BasicBlock *RethrowBB = createBasicBlock("cleanup");
+ if (CleanupHackLevel != CHL_Ideal || HasEHCleanup) {
llvm::BasicBlock *UnexpectedBB = createBasicBlock("ehspec.unexpected");
- llvm::Constant *Zero = llvm::ConstantInt::get(Builder.getInt32Ty(), 0);
+ llvm::Constant *Zero = llvm::ConstantInt::get(Int32Ty, 0);
llvm::Value *FailsFilter =
Builder.CreateICmpSLT(SavedSelection, Zero, "ehspec.fails");
- Builder.CreateCondBr(FailsFilter, UnexpectedBB, RethrowBB);
-
- // The rethrow block is where we land if this was a cleanup.
- // TODO: can this be _Unwind_Resume if the InvokeInlineHack is off?
- EmitBlock(RethrowBB);
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(getExceptionSlot()))
- ->setDoesNotReturn();
- Builder.CreateUnreachable();
+ Builder.CreateCondBr(FailsFilter, UnexpectedBB, getRethrowDest().getBlock());
EmitBlock(UnexpectedBB);
}
@@ -863,7 +888,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
Builder.CreateUnreachable();
// ...or a normal catch handler...
- } else if (!UseInvokeInlineHack && !HasEHCleanup) {
+ } else if (CleanupHackLevel == CHL_Ideal && !HasEHCleanup) {
llvm::Value *Type = EHSelector.back();
EmitBranchThroughEHCleanup(EHHandlers[Type]);
@@ -1440,14 +1465,39 @@ CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
llvm::StringRef RethrowName = Personality.getCatchallRethrowFnName();
- llvm::Constant *RethrowFn;
- if (!RethrowName.empty())
- RethrowFn = getCatchallRethrowFn(*this, RethrowName);
- else
- RethrowFn = getUnwindResumeOrRethrowFn();
+ if (!RethrowName.empty()) {
+ Builder.CreateCall(getCatchallRethrowFn(*this, RethrowName),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ } else {
+ llvm::Value *Exn = Builder.CreateLoad(getExceptionSlot());
+
+ switch (CleanupHackLevel) {
+ case CHL_MandatoryCatchall:
+ // In mandatory-catchall mode, we need to use
+ // _Unwind_Resume_or_Rethrow, or whatever the personality's
+ // equivalent is.
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(), Exn)
+ ->setDoesNotReturn();
+ break;
+ case CHL_MandatoryCleanup: {
+ // In mandatory-cleanup mode, we should use llvm.eh.resume.
+ llvm::Value *Selector = Builder.CreateLoad(getEHSelectorSlot());
+ Builder.CreateCall2(CGM.getIntrinsic(llvm::Intrinsic::eh_resume),
+ Exn, Selector)
+ ->setDoesNotReturn();
+ break;
+ }
+ case CHL_Ideal:
+ // In an idealized mode where we don't have to worry about the
+ // optimizer combining landing pads, we should just use
+ // _Unwind_Resume (or the personality's equivalent).
+ Builder.CreateCall(getUnwindResumeFn(), Exn)
+ ->setDoesNotReturn();
+ break;
+ }
+ }
- Builder.CreateCall(RethrowFn, Builder.CreateLoad(getExceptionSlot()))
- ->setDoesNotReturn();
Builder.CreateUnreachable();
Builder.restoreIP(SavedIP);
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index bc2cd35..2f6b55b 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -1390,7 +1390,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// The index must always be an integer, which is not an aggregate. Emit it.
llvm::Value *Idx = EmitScalarExpr(E->getIdx());
QualType IdxTy = E->getIdx()->getType();
- bool IdxSigned = IdxTy->isSignedIntegerType();
+ bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
// If the base is a vector type, then we are forming a vector element lvalue
// with this subscript.
@@ -1635,7 +1635,8 @@ LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
IEnd = Field->chain_end();
while (true) {
- LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I), CVRQualifiers);
+ LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I),
+ CVRQualifiers);
if (++I == IEnd) return LV;
assert(LV.isSimple());
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 29c7688..d8da642 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -642,7 +642,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (const RecordType *RT = CGF.getContext()
.getBaseElementType(ElementType)->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- hasNonTrivialCXXConstructor = !RD->hasTrivialConstructor();
+ hasNonTrivialCXXConstructor = !RD->hasTrivialDefaultConstructor();
}
// FIXME: were we intentionally ignoring address spaces and GC attributes?
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index bdaa873..81fee67 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -345,18 +345,7 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
}
}
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
- llvm::Value *Callee;
- if (MD->isVirtual() &&
- !canDevirtualizeMemberFunctionCalls(getContext(),
- E->getArg(0), MD))
- Callee = BuildVirtualCall(MD, This, Ty);
- else
- Callee = CGM.GetAddrOfFunction(MD, Ty);
-
+ llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
E->arg_begin() + 1, E->arg_end());
}
@@ -403,18 +392,26 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
E->arg_begin(), E->arg_end());
}
else {
- CXXCtorType Type;
- CXXConstructExpr::ConstructionKind K = E->getConstructionKind();
- if (K == CXXConstructExpr::CK_Delegating) {
+ CXXCtorType Type = Ctor_Complete;
+ bool ForVirtualBase = false;
+
+ switch (E->getConstructionKind()) {
+ case CXXConstructExpr::CK_Delegating:
// We should be emitting a constructor; GlobalDecl will assert this
Type = CurGD.getCtorType();
- } else {
- Type = (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
- ? Ctor_Complete : Ctor_Base;
- }
+ break;
+
+ case CXXConstructExpr::CK_Complete:
+ Type = Ctor_Complete;
+ break;
+
+ case CXXConstructExpr::CK_VirtualBase:
+ ForVirtualBase = true;
+ // fall-through
- bool ForVirtualBase =
- E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
+ case CXXConstructExpr::CK_NonVirtualBase:
+ Type = Ctor_Base;
+ }
// Call the constructor.
EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
@@ -447,204 +444,256 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
E->arg_begin(), E->arg_end());
}
-/// Check whether the given operator new[] is the global placement
-/// operator new[].
-static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
- const FunctionDecl *Fn) {
- // Must be in global scope. Note that allocation functions can't be
- // declared in namespaces.
- if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
- return false;
-
- // Signature must be void *operator new[](size_t, void*).
- // The size_t is common to all operator new[]s.
- if (Fn->getNumParams() != 2)
- return false;
-
- CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
- return (ParamType == Ctx.VoidPtrTy);
-}
-
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
const CXXNewExpr *E) {
if (!E->isArray())
return CharUnits::Zero();
- // No cookie is required if the new operator being used is
- // ::operator new[](size_t, void*).
- const FunctionDecl *OperatorNew = E->getOperatorNew();
- if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
+ // No cookie is required if the operator new[] being used is the
+ // reserved placement operator new[].
+ if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
return CharUnits::Zero();
return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
}
-static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
- CodeGenFunction &CGF,
- const CXXNewExpr *E,
- llvm::Value *&NumElements,
- llvm::Value *&SizeWithoutCookie) {
- QualType ElemType = E->getAllocatedType();
-
- const llvm::IntegerType *SizeTy =
- cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
-
- CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
-
- if (!E->isArray()) {
- SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
- return SizeWithoutCookie;
+static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
+ const CXXNewExpr *e,
+ llvm::Value *&numElements,
+ llvm::Value *&sizeWithoutCookie) {
+ QualType type = e->getAllocatedType();
+
+ if (!e->isArray()) {
+ CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
+ sizeWithoutCookie
+ = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
+ return sizeWithoutCookie;
}
+ // The width of size_t.
+ unsigned sizeWidth = CGF.SizeTy->getBitWidth();
+
// Figure out the cookie size.
- CharUnits CookieSize = CalculateCookiePadding(CGF, E);
+ llvm::APInt cookieSize(sizeWidth,
+ CalculateCookiePadding(CGF, e).getQuantity());
// Emit the array size expression.
// We multiply the size of all dimensions for NumElements.
// e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
- NumElements = CGF.EmitScalarExpr(E->getArraySize());
- assert(NumElements->getType() == SizeTy && "element count not a size_t");
-
- uint64_t ArraySizeMultiplier = 1;
+ numElements = CGF.EmitScalarExpr(e->getArraySize());
+ assert(isa<llvm::IntegerType>(numElements->getType()));
+
+ // The number of elements can be have an arbitrary integer type;
+ // essentially, we need to multiply it by a constant factor, add a
+ // cookie size, and verify that the result is representable as a
+ // size_t. That's just a gloss, though, and it's wrong in one
+ // important way: if the count is negative, it's an error even if
+ // the cookie size would bring the total size >= 0.
+ bool isSigned
+ = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
+ const llvm::IntegerType *numElementsType
+ = cast<llvm::IntegerType>(numElements->getType());
+ unsigned numElementsWidth = numElementsType->getBitWidth();
+
+ // Compute the constant factor.
+ llvm::APInt arraySizeMultiplier(sizeWidth, 1);
while (const ConstantArrayType *CAT
- = CGF.getContext().getAsConstantArrayType(ElemType)) {
- ElemType = CAT->getElementType();
- ArraySizeMultiplier *= CAT->getSize().getZExtValue();
+ = CGF.getContext().getAsConstantArrayType(type)) {
+ type = CAT->getElementType();
+ arraySizeMultiplier *= CAT->getSize();
}
- llvm::Value *Size;
+ CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
+ llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
+ typeSizeMultiplier *= arraySizeMultiplier;
+
+ // This will be a size_t.
+ llvm::Value *size;
// If someone is doing 'new int[42]' there is no need to do a dynamic check.
// Don't bloat the -O0 code.
- if (llvm::ConstantInt *NumElementsC =
- dyn_cast<llvm::ConstantInt>(NumElements)) {
- llvm::APInt NEC = NumElementsC->getValue();
- unsigned SizeWidth = NEC.getBitWidth();
-
- // Determine if there is an overflow here by doing an extended multiply.
- NEC = NEC.zext(SizeWidth*2);
- llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
- SC *= NEC;
-
- if (!CookieSize.isZero()) {
- // Save the current size without a cookie. We don't care if an
- // overflow's already happened because SizeWithoutCookie isn't
- // used if the allocator returns null or throws, as it should
- // always do on an overflow.
- llvm::APInt SWC = SC.trunc(SizeWidth);
- SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
-
- // Add the cookie size.
- SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
+ if (llvm::ConstantInt *numElementsC =
+ dyn_cast<llvm::ConstantInt>(numElements)) {
+ const llvm::APInt &count = numElementsC->getValue();
+
+ bool hasAnyOverflow = false;
+
+ // If 'count' was a negative number, it's an overflow.
+ if (isSigned && count.isNegative())
+ hasAnyOverflow = true;
+
+ // We want to do all this arithmetic in size_t. If numElements is
+ // wider than that, check whether it's already too big, and if so,
+ // overflow.
+ else if (numElementsWidth > sizeWidth &&
+ numElementsWidth - sizeWidth > count.countLeadingZeros())
+ hasAnyOverflow = true;
+
+ // Okay, compute a count at the right width.
+ llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
+
+ // Scale numElements by that. This might overflow, but we don't
+ // care because it only overflows if allocationSize does, too, and
+ // if that overflows then we shouldn't use this.
+ numElements = llvm::ConstantInt::get(CGF.SizeTy,
+ adjustedCount * arraySizeMultiplier);
+
+ // Compute the size before cookie, and track whether it overflowed.
+ bool overflow;
+ llvm::APInt allocationSize
+ = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
+ hasAnyOverflow |= overflow;
+
+ // Add in the cookie, and check whether it's overflowed.
+ if (cookieSize != 0) {
+ // Save the current size without a cookie. This shouldn't be
+ // used if there was overflow.
+ sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
+
+ allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
+ hasAnyOverflow |= overflow;
}
-
- if (SC.countLeadingZeros() >= SizeWidth) {
- SC = SC.trunc(SizeWidth);
- Size = llvm::ConstantInt::get(SizeTy, SC);
+
+ // On overflow, produce a -1 so operator new will fail.
+ if (hasAnyOverflow) {
+ size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
} else {
- // On overflow, produce a -1 so operator new throws.
- Size = llvm::Constant::getAllOnesValue(SizeTy);
+ size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
}
- // Scale NumElements while we're at it.
- uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
- NumElements = llvm::ConstantInt::get(SizeTy, N);
-
- // Otherwise, we don't need to do an overflow-checked multiplication if
- // we're multiplying by one.
- } else if (TypeSize.isOne()) {
- assert(ArraySizeMultiplier == 1);
-
- Size = NumElements;
-
- // If we need a cookie, add its size in with an overflow check.
- // This is maybe a little paranoid.
- if (!CookieSize.isZero()) {
- SizeWithoutCookie = Size;
+ // Otherwise, we might need to use the overflow intrinsics.
+ } else {
+ // There are up to four conditions we need to test for:
+ // 1) if isSigned, we need to check whether numElements is negative;
+ // 2) if numElementsWidth > sizeWidth, we need to check whether
+ // numElements is larger than something representable in size_t;
+ // 3) we need to compute
+ // sizeWithoutCookie := numElements * typeSizeMultiplier
+ // and check whether it overflows; and
+ // 4) if we need a cookie, we need to compute
+ // size := sizeWithoutCookie + cookieSize
+ // and check whether it overflows.
+
+ llvm::Value *hasOverflow = 0;
+
+ // If numElementsWidth > sizeWidth, then one way or another, we're
+ // going to have to do a comparison for (2), and this happens to
+ // take care of (1), too.
+ if (numElementsWidth > sizeWidth) {
+ llvm::APInt threshold(numElementsWidth, 1);
+ threshold <<= sizeWidth;
+
+ llvm::Value *thresholdV
+ = llvm::ConstantInt::get(numElementsType, threshold);
+
+ hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
+ numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
+
+ // Otherwise, if we're signed, we want to sext up to size_t.
+ } else if (isSigned) {
+ if (numElementsWidth < sizeWidth)
+ numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
+
+ // If there's a non-1 type size multiplier, then we can do the
+ // signedness check at the same time as we do the multiply
+ // because a negative number times anything will cause an
+ // unsigned overflow. Otherwise, we have to do it here.
+ if (typeSizeMultiplier == 1)
+ hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, 0));
+
+ // Otherwise, zext up to size_t if necessary.
+ } else if (numElementsWidth < sizeWidth) {
+ numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
+ }
- llvm::Value *CookieSizeV
- = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
+ assert(numElements->getType() == CGF.SizeTy);
- const llvm::Type *Types[] = { SizeTy };
- llvm::Value *UAddF
- = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
- llvm::Value *AddRes
- = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
+ size = numElements;
- Size = CGF.Builder.CreateExtractValue(AddRes, 0);
- llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
- Size = CGF.Builder.CreateSelect(DidOverflow,
- llvm::ConstantInt::get(SizeTy, -1),
- Size);
+ // Multiply by the type size if necessary. This multiplier
+ // includes all the factors for nested arrays.
+ //
+ // This step also causes numElements to be scaled up by the
+ // nested-array factor if necessary. Overflow on this computation
+ // can be ignored because the result shouldn't be used if
+ // allocation fails.
+ if (typeSizeMultiplier != 1) {
+ const llvm::Type *intrinsicTypes[] = { CGF.SizeTy };
+ llvm::Value *umul_with_overflow
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow,
+ intrinsicTypes, 1);
+
+ llvm::Value *tsmV =
+ llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
+ llvm::Value *result =
+ CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
+
+ llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
+ if (hasOverflow)
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
+ else
+ hasOverflow = overflowed;
+
+ size = CGF.Builder.CreateExtractValue(result, 0);
+
+ // Also scale up numElements by the array size multiplier.
+ if (arraySizeMultiplier != 1) {
+ // If the base element type size is 1, then we can re-use the
+ // multiply we just did.
+ if (typeSize.isOne()) {
+ assert(arraySizeMultiplier == typeSizeMultiplier);
+ numElements = size;
+
+ // Otherwise we need a separate multiply.
+ } else {
+ llvm::Value *asmV =
+ llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
+ numElements = CGF.Builder.CreateMul(numElements, asmV);
+ }
+ }
+ } else {
+ // numElements doesn't need to be scaled.
+ assert(arraySizeMultiplier == 1);
}
+
+ // Add in the cookie size if necessary.
+ if (cookieSize != 0) {
+ sizeWithoutCookie = size;
+
+ const llvm::Type *intrinsicTypes[] = { CGF.SizeTy };
+ llvm::Value *uadd_with_overflow
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow,
+ intrinsicTypes, 1);
+
+ llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
+ llvm::Value *result =
+ CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
+
+ llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
+ if (hasOverflow)
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
+ else
+ hasOverflow = overflowed;
- // Otherwise use the int.umul.with.overflow intrinsic.
- } else {
- llvm::Value *OutermostElementSize
- = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
-
- llvm::Value *NumOutermostElements = NumElements;
-
- // Scale NumElements by the array size multiplier. This might
- // overflow, but only if the multiplication below also overflows,
- // in which case this multiplication isn't used.
- if (ArraySizeMultiplier != 1)
- NumElements = CGF.Builder.CreateMul(NumElements,
- llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
-
- // The requested size of the outermost array is non-constant.
- // Multiply that by the static size of the elements of that array;
- // on unsigned overflow, set the size to -1 to trigger an
- // exception from the allocation routine. This is sufficient to
- // prevent buffer overruns from the allocator returning a
- // seemingly valid pointer to insufficient space. This idea comes
- // originally from MSVC, and GCC has an open bug requesting
- // similar behavior:
- // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
- //
- // This will not be sufficient for C++0x, which requires a
- // specific exception class (std::bad_array_new_length).
- // That will require ABI support that has not yet been specified.
- const llvm::Type *Types[] = { SizeTy };
- llvm::Value *UMulF
- = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
- llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
- OutermostElementSize);
-
- // The overflow bit.
- llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
-
- // The result of the multiplication.
- Size = CGF.Builder.CreateExtractValue(MulRes, 0);
-
- // If we have a cookie, we need to add that size in, too.
- if (!CookieSize.isZero()) {
- SizeWithoutCookie = Size;
-
- llvm::Value *CookieSizeV
- = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
- llvm::Value *UAddF
- = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
- llvm::Value *AddRes
- = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
-
- Size = CGF.Builder.CreateExtractValue(AddRes, 0);
-
- llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
- DidOverflow = CGF.Builder.CreateOr(DidOverflow, AddDidOverflow);
+ size = CGF.Builder.CreateExtractValue(result, 0);
}
- Size = CGF.Builder.CreateSelect(DidOverflow,
- llvm::ConstantInt::get(SizeTy, -1),
- Size);
+ // If we had any possibility of dynamic overflow, make a select to
+ // overwrite 'size' with an all-ones value, which should cause
+ // operator new to throw.
+ if (hasOverflow)
+ size = CGF.Builder.CreateSelect(hasOverflow,
+ llvm::Constant::getAllOnesValue(CGF.SizeTy),
+ size);
}
- if (CookieSize.isZero())
- SizeWithoutCookie = Size;
+ if (cookieSize == 0)
+ sizeWithoutCookie = size;
else
- assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
+ assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
- return Size;
+ return size;
}
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
@@ -741,7 +790,7 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
if (E->isArray()) {
if (CXXConstructorDecl *Ctor = E->getConstructor()) {
bool RequiresZeroInitialization = false;
- if (Ctor->getParent()->hasTrivialConstructor()) {
+ if (Ctor->getParent()->hasTrivialDefaultConstructor()) {
// If new expression did not specify value-initialization, then there
// is no initialization.
if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
@@ -972,8 +1021,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Value *numElements = 0;
llvm::Value *allocSizeWithoutCookie = 0;
llvm::Value *allocSize =
- EmitCXXNewAllocSize(getContext(), *this, E, numElements,
- allocSizeWithoutCookie);
+ EmitCXXNewAllocSize(*this, E, numElements, allocSizeWithoutCookie);
allocatorArgs.add(RValue::get(allocSize), sizeType);
@@ -1007,11 +1055,19 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
}
- // Emit the allocation call.
- RValue RV =
- EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType),
- CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
- allocatorArgs, allocator);
+ // Emit the allocation call. If the allocator is a global placement
+ // operator, just "inline" it directly.
+ RValue RV;
+ if (allocator->isReservedGlobalPlacementOperator()) {
+ assert(allocatorArgs.size() == 2);
+ RV = allocatorArgs[1].RV;
+ // TODO: kill any unnecessary computations done for the size
+ // argument.
+ } else {
+ RV = EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType),
+ CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
+ allocatorArgs, allocator);
+ }
// Emit a null check on the allocation result if the allocation
// function is allowed to return null (because it has a non-throwing
@@ -1056,7 +1112,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// If there's an operator delete, enter a cleanup to call it if an
// exception is thrown.
EHScopeStack::stable_iterator operatorDeleteCleanup;
- if (E->getOperatorDelete()) {
+ if (E->getOperatorDelete() &&
+ !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
operatorDeleteCleanup = EHStack.stable_begin();
}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 463b913..da37bd5 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -352,8 +352,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are
// ignored:
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD) ||
- CGM.getContext().ZeroBitfieldFollowsBitfield((*Field), LastFD)) {
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
--FieldNo;
continue;
}
@@ -611,12 +610,12 @@ public:
return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destType));
case CK_IntegralCast: {
- bool isSigned = subExpr->getType()->isSignedIntegerType();
+ bool isSigned = subExpr->getType()->isSignedIntegerOrEnumerationType();
return llvm::ConstantExpr::getIntegerCast(C, destType, isSigned);
}
case CK_IntegralToPointer: {
- bool isSigned = subExpr->getType()->isSignedIntegerType();
+ bool isSigned = subExpr->getType()->isSignedIntegerOrEnumerationType();
C = llvm::ConstantExpr::getIntegerCast(C, CGM.IntPtrTy, isSigned);
return llvm::ConstantExpr::getIntToPtr(C, destType);
}
@@ -626,13 +625,13 @@ public:
llvm::Constant::getNullValue(C->getType()));
case CK_IntegralToFloating:
- if (subExpr->getType()->isSignedIntegerType())
+ if (subExpr->getType()->isSignedIntegerOrEnumerationType())
return llvm::ConstantExpr::getSIToFP(C, destType);
else
return llvm::ConstantExpr::getUIToFP(C, destType);
case CK_FloatingToIntegral:
- if (E->getType()->isSignedIntegerType())
+ if (E->getType()->isSignedIntegerOrEnumerationType())
return llvm::ConstantExpr::getFPToSI(C, destType);
else
return llvm::ConstantExpr::getFPToUI(C, destType);
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 6bcc425..dff7bf4 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -400,7 +400,7 @@ public:
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
- if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
@@ -508,6 +508,7 @@ public:
Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
return CGF.EmitObjCStringLiteral(E);
}
+ Value *VisitAsTypeExpr(AsTypeExpr *CE);
};
} // end anonymous namespace.
@@ -568,7 +569,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// First, convert to the correct width so that we control the kind of
// extension.
const llvm::Type *MiddleTy = CGF.IntPtrTy;
- bool InputSigned = SrcType->isSignedIntegerType();
+ bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
// Then, cast to pointer.
@@ -610,7 +611,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Finally, we have the arithmetic types: real int/float.
if (isa<llvm::IntegerType>(Src->getType())) {
- bool InputSigned = SrcType->isSignedIntegerType();
+ bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
if (isa<llvm::IntegerType>(DstTy))
return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
else if (InputSigned)
@@ -621,7 +622,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
assert(Src->getType()->isFloatingPointTy() && "Unknown real conversion");
if (isa<llvm::IntegerType>(DstTy)) {
- if (DstType->isSignedIntegerType())
+ if (DstType->isSignedIntegerOrEnumerationType())
return Builder.CreateFPToSI(Src, DstTy, "conv");
else
return Builder.CreateFPToUI(Src, DstTy, "conv");
@@ -758,19 +759,13 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
// Handle vec3 special since the index will be off by one for the RHS.
+ const llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
llvm::SmallVector<llvm::Constant*, 32> indices;
for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
- llvm::Constant *C = cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i)));
- const llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
- if (VTy->getNumElements() == 3) {
- if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) {
- uint64_t cVal = CI->getZExtValue();
- if (cVal > 3) {
- C = llvm::ConstantInt::get(C->getType(), cVal-1);
- }
- }
- }
- indices.push_back(C);
+ unsigned Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
+ if (VTy->getNumElements() == 3 && Idx > 3)
+ Idx -= 1;
+ indices.push_back(Builder.getInt32(Idx));
}
Value *SV = llvm::ConstantVector::get(indices);
@@ -813,7 +808,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
// integer value.
Value *Base = Visit(E->getBase());
Value *Idx = Visit(E->getIdx());
- bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
+ bool IdxSigned = E->getIdx()->getType()->isSignedIntegerOrEnumerationType();
Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast");
return Builder.CreateExtractElement(Base, Idx, "vecext");
}
@@ -1142,7 +1137,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// First, convert to the correct width so that we control the kind of
// extension.
const llvm::Type *MiddleTy = CGF.IntPtrTy;
- bool InputSigned = E->getType()->isSignedIntegerType();
+ bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -1285,7 +1280,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Note that signed integer inc/dec with width less than int can't
// overflow because of promotion rules; we're just eliding a few steps here.
- if (type->isSignedIntegerType() &&
+ if (type->isSignedIntegerOrEnumerationType() &&
value->getType()->getPrimitiveSizeInBits() >=
CGF.CGM.IntTy->getBitWidth())
value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
@@ -1333,10 +1328,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (type->hasIntegerRepresentation()) {
llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
- if (type->hasSignedIntegerRepresentation())
- value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
- else
- value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
} else {
value = Builder.CreateFAdd(
value,
@@ -1447,7 +1439,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Compute the index
Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
- bool IdxSigned = IdxExpr->getType()->isSignedIntegerType();
+ bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
// Save the element type
@@ -1797,9 +1789,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
// Get the overflow handler.
const llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext);
- std::vector<const llvm::Type*> argTypes;
- argTypes.push_back(CGF.Int64Ty); argTypes.push_back(CGF.Int64Ty);
- argTypes.push_back(Int8Ty); argTypes.push_back(Int8Ty);
+ const llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
llvm::FunctionType *handlerTy =
llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
@@ -1829,7 +1819,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
if (!Ops.Ty->isAnyPointerType()) {
- if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
@@ -1879,7 +1869,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
const llvm::Type *IdxType = CGF.IntPtrTy;
- if (IdxExp->getType()->isSignedIntegerType())
+ if (IdxExp->getType()->isSignedIntegerOrEnumerationType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
@@ -1914,7 +1904,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
- if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
@@ -1954,7 +1944,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
const llvm::Type *IdxType = CGF.IntPtrTy;
- if (BinOp->getRHS()->getType()->isSignedIntegerType())
+ if (BinOp->getRHS()->getType()->isSignedIntegerOrEnumerationType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
@@ -2556,6 +2546,56 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
return CGF.EmitBlockLiteral(block);
}
+Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
+ Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
+ const llvm::Type * DstTy = ConvertType(E->getDstType());
+
+ // Going from vec4->vec3 or vec3->vec4 is a special case and requires
+ // a shuffle vector instead of a bitcast.
+ const llvm::Type *SrcTy = Src->getType();
+ if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) {
+ unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements();
+ unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements();
+ if ((numElementsDst == 3 && numElementsSrc == 4)
+ || (numElementsDst == 4 && numElementsSrc == 3)) {
+
+
+ // In the case of going from int4->float3, a bitcast is needed before
+ // doing a shuffle.
+ const llvm::Type *srcElemTy =
+ cast<llvm::VectorType>(SrcTy)->getElementType();
+ const llvm::Type *dstElemTy =
+ cast<llvm::VectorType>(DstTy)->getElementType();
+
+ if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy())
+ || (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) {
+ // Create a float type of the same size as the source or destination.
+ const llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy,
+ numElementsSrc);
+
+ Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast");
+ }
+
+ llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
+
+ llvm::SmallVector<llvm::Constant*, 3> Args;
+ Args.push_back(Builder.getInt32(0));
+ Args.push_back(Builder.getInt32(1));
+ Args.push_back(Builder.getInt32(2));
+
+ if (numElementsDst == 4)
+ Args.push_back(llvm::UndefValue::get(
+ llvm::Type::getInt32Ty(CGF.getLLVMContext())));
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+
+ return Builder.CreateShuffleVector(Src, UnV, Mask, "astype");
+ }
+ }
+
+ return Builder.CreateBitCast(Src, DstTy, "astype");
+}
+
//===----------------------------------------------------------------------===//
// Entry Point into this File
//===----------------------------------------------------------------------===//
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 5b0d41e..fa42cd1 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -47,6 +47,23 @@ llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol());
}
+/// \brief Adjust the type of the result of an Objective-C message send
+/// expression when the method has a related result type.
+static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
+ const Expr *E,
+ const ObjCMethodDecl *Method,
+ RValue Result) {
+ if (!Method)
+ return Result;
+ if (!Method->hasRelatedResultType() ||
+ CGF.getContext().hasSameType(E->getType(), Method->getResultType()) ||
+ !Result.isScalar())
+ return Result;
+
+ // We have applied a related result type. Cast the rvalue appropriately.
+ return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
+ CGF.ConvertType(E->getType())));
+}
RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return) {
@@ -59,15 +76,17 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
bool isClassMessage = false;
ObjCInterfaceDecl *OID = 0;
// Find the receiver
+ QualType ReceiverType;
llvm::Value *Receiver = 0;
switch (E->getReceiverKind()) {
case ObjCMessageExpr::Instance:
Receiver = EmitScalarExpr(E->getInstanceReceiver());
+ ReceiverType = E->getInstanceReceiver()->getType();
break;
case ObjCMessageExpr::Class: {
- const ObjCObjectType *ObjTy
- = E->getClassReceiver()->getAs<ObjCObjectType>();
+ ReceiverType = E->getClassReceiver();
+ const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
assert(ObjTy && "Invalid Objective-C class message send");
OID = ObjTy->getInterface();
assert(OID && "Invalid Objective-C class message send");
@@ -77,11 +96,13 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
}
case ObjCMessageExpr::SuperInstance:
+ ReceiverType = E->getSuperType();
Receiver = LoadObjCSelf();
isSuperMessage = true;
break;
case ObjCMessageExpr::SuperClass:
+ ReceiverType = E->getSuperType();
Receiver = LoadObjCSelf();
isSuperMessage = true;
isClassMessage = true;
@@ -94,31 +115,35 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
QualType ResultType =
E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+ RValue result;
if (isSuperMessage) {
// super is only valid in an Objective-C method
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- return Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
- E->getSelector(),
- OMD->getClassInterface(),
- isCategoryImpl,
- Receiver,
- isClassMessage,
- Args,
- E->getMethodDecl());
+ result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
+ E->getSelector(),
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args,
+ E->getMethodDecl());
+ } else {
+ result = Runtime.GenerateMessageSend(*this, Return, ResultType,
+ E->getSelector(),
+ Receiver, Args, OID,
+ E->getMethodDecl());
}
-
- return Runtime.GenerateMessageSend(*this, Return, ResultType,
- E->getSelector(),
- Receiver, Args, OID,
- E->getMethodDecl());
+
+ return AdjustRelatedResultType(*this, E, E->getMethodDecl(), result);
}
/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
/// the LLVM function and sets the other context used by
/// CodeGenFunction.
void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
- const ObjCContainerDecl *CD) {
+ const ObjCContainerDecl *CD,
+ SourceLocation StartLoc) {
FunctionArgList args;
// Check if we should generate debug info for this method.
if (CGM.getModuleDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
@@ -138,7 +163,7 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
CurGD = OMD;
- StartFunction(OMD, OMD->getResultType(), Fn, FI, args, OMD->getLocStart());
+ StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
}
void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
@@ -151,16 +176,14 @@ void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
// objc_copyStruct (ReturnValue, &structIvar,
// sizeof (Type of Ivar), isAtomic, false);
CallArgList Args;
- RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue,
- Types.ConvertType(getContext().VoidPtrTy)));
+ RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue, VoidPtrTy));
Args.add(RV, getContext().VoidPtrTy);
- RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
- Types.ConvertType(getContext().VoidPtrTy)));
+ RV = RValue::get(Builder.CreateBitCast(LV.getAddress(), VoidPtrTy));
Args.add(RV, getContext().VoidPtrTy);
// sizeof (Type of Ivar)
CharUnits Size = getContext().getTypeSizeInChars(Ivar->getType());
llvm::Value *SizeVal =
- llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy),
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy),
Size.getQuantity());
Args.add(RValue::get(SizeVal), getContext().LongTy);
llvm::Value *isAtomic =
@@ -179,7 +202,7 @@ void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
/// Generate an Objective-C method. An Objective-C method is a C function with
/// its pointer, name, and types registered in the class struture.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
- StartObjCMethod(OMD, OMD->getClassInterface());
+ StartObjCMethod(OMD, OMD->getClassInterface(), OMD->getLocStart());
EmitStmt(OMD->getBody());
FinishFunction(OMD->getBodyRBrace());
}
@@ -199,7 +222,7 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
assert(OMD && "Invalid call to generate getter (empty method)");
- StartObjCMethod(OMD, IMP->getClassInterface());
+ StartObjCMethod(OMD, IMP->getClassInterface(), PID->getLocStart());
// Determine if we should use an objc_getProperty call for
// this. Non-atomic properties are directly evaluated.
@@ -292,7 +315,7 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const CXXRecordDecl *classDecl = IVART->getAsCXXRecordDecl();
if (PID->getGetterCXXConstructor() &&
- classDecl && !classDecl->hasTrivialConstructor()) {
+ classDecl && !classDecl->hasTrivialDefaultConstructor()) {
ReturnStmt *Stmt =
new (getContext()) ReturnStmt(SourceLocation(),
PID->getGetterCXXConstructor(),
@@ -398,7 +421,7 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
assert(OMD && "Invalid call to generate setter (empty method)");
- StartObjCMethod(OMD, IMP->getClassInterface());
+ StartObjCMethod(OMD, IMP->getClassInterface(), PID->getLocStart());
const llvm::Triple &Triple = getContext().Target.getTriple();
QualType IVART = Ivar->getType();
bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
@@ -496,7 +519,7 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
}
else {
// FIXME: Find a clean way to avoid AST node creation.
- SourceLocation Loc = PD->getLocation();
+ SourceLocation Loc = PID->getLocStart();
ValueDecl *Self = OMD->getSelfDecl();
ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
DeclRefExpr Base(Self, Self->getType(), VK_RValue, Loc);
@@ -618,7 +641,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD,
bool ctor) {
MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
- StartObjCMethod(MD, IMP->getClassInterface());
+ StartObjCMethod(MD, IMP->getClassInterface(), MD->getLocStart());
// Emit .cxx_construct.
if (ctor) {
@@ -712,26 +735,31 @@ RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
const ObjCPropertyRefExpr *E = LV.getPropertyRefExpr();
QualType ResultType = E->getGetterResultType();
Selector S;
+ const ObjCMethodDecl *method;
if (E->isExplicitProperty()) {
const ObjCPropertyDecl *Property = E->getExplicitProperty();
S = Property->getGetterName();
+ method = Property->getGetterMethodDecl();
} else {
- const ObjCMethodDecl *Getter = E->getImplicitPropertyGetter();
- S = Getter->getSelector();
+ method = E->getImplicitPropertyGetter();
+ S = method->getSelector();
}
llvm::Value *Receiver = LV.getPropertyRefBaseAddr();
// Accesses to 'super' follow a different code path.
if (E->isSuperReceiver())
- return GenerateMessageSendSuper(*this, Return, ResultType,
- S, Receiver, CallArgList());
-
+ return AdjustRelatedResultType(*this, E, method,
+ GenerateMessageSendSuper(*this, Return,
+ ResultType,
+ S, Receiver,
+ CallArgList()));
const ObjCInterfaceDecl *ReceiverClass
= (E->isClassReceiver() ? E->getClassReceiver() : 0);
- return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Return, ResultType, S,
- Receiver, CallArgList(), ReceiverClass);
+ return AdjustRelatedResultType(*this, E, method,
+ CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Return, ResultType, S,
+ Receiver, CallArgList(), ReceiverClass));
}
void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index c4dc4c4..f0993c5 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -52,7 +52,7 @@ class LazyRuntimeFunction {
CodeGenModule *CGM;
std::vector<const llvm::Type*> ArgTys;
const char *FunctionName;
- llvm::Function *Function;
+ llvm::Constant *Function;
public:
/// Constructor leaves this class uninitialized, because it is intended to
/// be used as a field in another class and not all of the types that are
@@ -78,7 +78,7 @@ class LazyRuntimeFunction {
}
/// Overloaded cast operator, allows the class to be implicitly cast to an
/// LLVM constant.
- operator llvm::Function*() {
+ operator llvm::Constant*() {
if (!Function) {
if (0 == FunctionName) return 0;
// We put the return type on the end of the vector, so pop it back off
@@ -86,13 +86,17 @@ class LazyRuntimeFunction {
ArgTys.pop_back();
llvm::FunctionType *FTy = llvm::FunctionType::get(RetTy, ArgTys, false);
Function =
- cast<llvm::Function>(CGM->CreateRuntimeFunction(FTy, FunctionName));
+ cast<llvm::Constant>(CGM->CreateRuntimeFunction(FTy, FunctionName));
// We won't need to use the types again, so we may as well clean up the
// vector now
ArgTys.resize(0);
}
return Function;
}
+ operator llvm::Function*() {
+ return cast<llvm::Function>((llvm::Constant*)*this);
+ }
+
};
@@ -314,7 +318,7 @@ private:
/// The version of the runtime that this class targets. Must match the
/// version in the runtime.
- const int RuntimeVersion;
+ int RuntimeVersion;
/// The version of the protocol class. Used to differentiate between ObjC1
/// and ObjC2 protocols. Objective-C 1 protocols can not contain optional
/// components and can not contain declared properties. We always emit
@@ -444,10 +448,10 @@ public:
const ObjCProtocolDecl *PD);
virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
virtual llvm::Function *ModuleInitFunction();
- virtual llvm::Function *GetPropertyGetFunction();
- virtual llvm::Function *GetPropertySetFunction();
- virtual llvm::Function *GetSetStructFunction();
- virtual llvm::Function *GetGetStructFunction();
+ virtual llvm::Constant *GetPropertyGetFunction();
+ virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetSetStructFunction();
+ virtual llvm::Constant *GetGetStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
virtual void EmitTryStmt(CodeGenFunction &CGF,
@@ -484,6 +488,10 @@ public:
const CGBlockInfo &blockInfo) {
return NULLPtr;
}
+
+ virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
+ return 0;
+ }
};
/// Class representing the legacy GCC Objective-C ABI. This is the default when
/// -fobjc-nonfragile-abi is not specified.
@@ -654,7 +662,6 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
: CGM(cgm), TheModule(CGM.getModule()), VMContext(cgm.getLLVMContext()),
ClassPtrAlias(0), MetaClassPtrAlias(0), RuntimeVersion(runtimeABIVersion),
ProtocolVersion(protocolClassVersion) {
-
msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
@@ -729,14 +736,16 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
PtrDiffTy, BoolTy, BoolTy, NULL);
// IMP type
- std::vector<const llvm::Type*> IMPArgs;
- IMPArgs.push_back(IdTy);
- IMPArgs.push_back(SelectorTy);
+ const llvm::Type *IMPArgs[] = { IdTy, SelectorTy };
IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs,
true));
// Don't bother initialising the GC stuff unless we're compiling in GC mode
if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ // This is a bit of an hack. We should sort this out by having a proper
+ // CGObjCGNUstep subclass for GC, but we may want to really support the old
+ // ABI and GC added in ObjectiveC2.framework, so we fudge it a bit for now
+ RuntimeVersion = 10;
// Get selectors needed in GC mode
RetainSel = GetNullarySelector("retain", CGM.getContext());
ReleaseSel = GetNullarySelector("release", CGM.getContext());
@@ -775,11 +784,8 @@ llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
EmitClassRef(OID->getNameAsString());
ClassName = Builder.CreateStructGEP(ClassName, 0);
- std::vector<const llvm::Type*> Params(1, PtrToInt8Ty);
llvm::Constant *ClassLookupFn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy,
- Params,
- true),
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true),
"objc_lookup_class");
return Builder.CreateCall(ClassLookupFn, ClassName);
}
@@ -945,16 +951,17 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
bool IsClassMessage,
const CallArgList &CallArgs,
const ObjCMethodDecl *Method) {
- if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ CGBuilderTy &Builder = CGF.Builder;
+ if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly) {
if (Sel == RetainSel || Sel == AutoreleaseSel) {
- return RValue::get(Receiver);
+ return RValue::get(EnforceType(Builder, Receiver,
+ CGM.getTypes().ConvertType(ResultType)));
}
if (Sel == ReleaseSel) {
return RValue::get(0);
}
}
- CGBuilderTy &Builder = CGF.Builder;
llvm::Value *cmd = GetSelector(Builder, Sel);
@@ -971,14 +978,12 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
llvm::Value *ReceiverClass = 0;
if (isCategoryImpl) {
llvm::Constant *classLookupFunction = 0;
- std::vector<const llvm::Type*> Params;
- Params.push_back(PtrTy);
if (IsClassMessage) {
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- IdTy, Params, true), "objc_get_meta_class");
+ IdTy, PtrTy, true), "objc_get_meta_class");
} else {
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- IdTy, Params, true), "objc_get_class");
+ IdTy, PtrTy, true), "objc_get_class");
}
ReceiverClass = Builder.CreateCall(classLookupFunction,
MakeConstantString(Class->getNameAsString()));
@@ -1052,18 +1057,19 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
const CallArgList &CallArgs,
const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method) {
+ CGBuilderTy &Builder = CGF.Builder;
+
// Strip out message sends to retain / release in GC mode
- if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly) {
if (Sel == RetainSel || Sel == AutoreleaseSel) {
- return RValue::get(Receiver);
+ return RValue::get(EnforceType(Builder, Receiver,
+ CGM.getTypes().ConvertType(ResultType)));
}
if (Sel == ReleaseSel) {
return RValue::get(0);
}
}
- CGBuilderTy &Builder = CGF.Builder;
-
// If the return type is something that goes in an integer register, the
// runtime will handle 0 returns. For other cases, we fill in the 0 value
// ourselves.
@@ -2127,7 +2133,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// The symbol table is contained in a module which has some version-checking
// constants
llvm::StructType * ModuleTy = llvm::StructType::get(VMContext, LongTy, LongTy,
- PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), NULL);
+ PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy),
+ (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) ? NULL : IntTy,
+ NULL);
Elements.clear();
// Runtime version, used for ABI compatibility checking.
Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
@@ -2144,8 +2152,17 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
std::string path =
std::string(mainFile->getDir()->getName()) + '/' + mainFile->getName();
Elements.push_back(MakeConstantString(path, ".objc_source_file_name"));
-
Elements.push_back(SymTab);
+
+ switch (CGM.getLangOptions().getGCMode()) {
+ case LangOptions::GCOnly:
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
+ case LangOptions::NonGC:
+ break;
+ case LangOptions::HybridGC:
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ }
+
llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
// Create the load function calling the runtime entry point with the module
@@ -2159,10 +2176,10 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
CGBuilderTy Builder(VMContext);
Builder.SetInsertPoint(EntryBB);
- std::vector<const llvm::Type*> Params(1,
- llvm::PointerType::getUnqual(ModuleTy));
- llvm::Value *Register = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::Type::getVoidTy(VMContext), Params, true), "__objc_exec_class");
+ llvm::FunctionType *FT =
+ llvm::FunctionType::get(Builder.getVoidTy(),
+ llvm::PointerType::getUnqual(ModuleTy), true);
+ llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
Builder.CreateCall(Register, Module);
Builder.CreateRetVoid();
@@ -2192,18 +2209,18 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
return Method;
}
-llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
+llvm::Constant *CGObjCGNU::GetPropertyGetFunction() {
return GetPropertyFn;
}
-llvm::Function *CGObjCGNU::GetPropertySetFunction() {
+llvm::Constant *CGObjCGNU::GetPropertySetFunction() {
return SetPropertyFn;
}
-llvm::Function *CGObjCGNU::GetGetStructFunction() {
+llvm::Constant *CGObjCGNU::GetGetStructFunction() {
return GetStructPropertyFn;
}
-llvm::Function *CGObjCGNU::GetSetStructFunction() {
+llvm::Constant *CGObjCGNU::GetSetStructFunction() {
return SetStructPropertyFn;
}
@@ -2273,7 +2290,7 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
llvm::Value *AddrWeakObj) {
CGBuilderTy B = CGF.Builder;
- AddrWeakObj = EnforceType(B, AddrWeakObj, IdTy);
+ AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy);
return B.CreateCall(WeakReadFn, AddrWeakObj);
}
@@ -2303,7 +2320,7 @@ void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
llvm::Value *ivarOffset) {
CGBuilderTy B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- dst = EnforceType(B, dst, PtrToIdTy);
+ dst = EnforceType(B, dst, IdTy);
B.CreateCall3(IvarAssignFn, src, dst, ivarOffset);
}
@@ -2320,8 +2337,8 @@ void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
llvm::Value *SrcPtr,
llvm::Value *Size) {
CGBuilderTy B = CGF.Builder;
- DestPtr = EnforceType(B, DestPtr, IdTy);
- SrcPtr = EnforceType(B, SrcPtr, PtrToIdTy);
+ DestPtr = EnforceType(B, DestPtr, PtrTy);
+ SrcPtr = EnforceType(B, SrcPtr, PtrTy);
B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, Size);
}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 2b1cfe3..8c3e9a3 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -43,18 +43,6 @@ using namespace clang;
using namespace CodeGen;
-static void EmitNullReturnInitialization(CodeGenFunction &CGF,
- ReturnValueSlot &returnSlot,
- QualType resultType) {
- // Force the return slot to exist.
- if (!returnSlot.getValue())
- returnSlot = ReturnValueSlot(CGF.CreateMemTemp(resultType), false);
- CGF.EmitNullInitialization(returnSlot.getValue(), resultType);
-}
-
-
-///
-
namespace {
typedef std::vector<llvm::Constant*> ConstantVector;
@@ -67,89 +55,89 @@ protected:
llvm::LLVMContext &VMContext;
private:
+ // The types of these functions don't really matter because we
+ // should always bitcast before calling them.
+
+ /// id objc_msgSend (id, SEL, ...)
+ ///
+ /// The default messenger, used for sends whose ABI is unchanged from
+ /// the all-integer/pointer case.
llvm::Constant *getMessageSendFn() const {
- // id objc_msgSend (id, SEL, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(ObjectPtrTy);
- Params.push_back(SelectorPtrTy);
- return
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
- "objc_msgSend");
+ const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend");
}
+ /// void objc_msgSend_stret (id, SEL, ...)
+ ///
+ /// The messenger used when the return value is an aggregate returned
+ /// by indirect reference in the first argument, and therefore the
+ /// self and selector parameters are shifted over by one.
llvm::Constant *getMessageSendStretFn() const {
- // id objc_msgSend_stret (id, SEL, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(ObjectPtrTy);
- Params.push_back(SelectorPtrTy);
- return
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- Params, true),
- "objc_msgSend_stret");
+ const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy,
+ params, true),
+ "objc_msgSend_stret");
}
+ /// [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
+ ///
+ /// The messenger used when the return value is returned on the x87
+ /// floating-point stack; without a special entrypoint, the nil case
+ /// would be unbalanced.
llvm::Constant *getMessageSendFpretFn() const {
- // FIXME: This should be long double on x86_64?
- // [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(ObjectPtrTy);
- Params.push_back(SelectorPtrTy);
- return
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::Type::getDoubleTy(VMContext),
- Params,
- true),
- "objc_msgSend_fpret");
+ params, true),
+ "objc_msgSend_fpret");
}
+ /// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+ ///
+ /// The messenger used for super calls, which have different dispatch
+ /// semantics. The class passed is the superclass of the current
+ /// class.
llvm::Constant *getMessageSendSuperFn() const {
- // id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
- const char *SuperName = "objc_msgSendSuper";
- std::vector<const llvm::Type*> Params;
- Params.push_back(SuperPtrTy);
- Params.push_back(SelectorPtrTy);
+ const llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
- SuperName);
+ params, true),
+ "objc_msgSendSuper");
}
+ /// id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
+ ///
+ /// A slightly different messenger used for super calls. The class
+ /// passed is the current class.
llvm::Constant *getMessageSendSuperFn2() const {
- // id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
- const char *SuperName = "objc_msgSendSuper2";
- std::vector<const llvm::Type*> Params;
- Params.push_back(SuperPtrTy);
- Params.push_back(SelectorPtrTy);
+ const llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
- SuperName);
+ params, true),
+ "objc_msgSendSuper2");
}
+ /// void objc_msgSendSuper_stret(void *stretAddr, struct objc_super *super,
+ /// SEL op, ...)
+ ///
+ /// The messenger used for super calls which return an aggregate indirectly.
llvm::Constant *getMessageSendSuperStretFn() const {
- // void objc_msgSendSuper_stret(void * stretAddr, struct objc_super *super,
- // SEL op, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(Int8PtrTy);
- Params.push_back(SuperPtrTy);
- Params.push_back(SelectorPtrTy);
+ const llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- Params, true),
+ llvm::FunctionType::get(CGM.VoidTy, params, true),
"objc_msgSendSuper_stret");
}
+ /// void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
+ /// SEL op, ...)
+ ///
+ /// objc_msgSendSuper_stret with the super2 semantics.
llvm::Constant *getMessageSendSuperStretFn2() const {
- // void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
- // SEL op, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(Int8PtrTy);
- Params.push_back(SuperPtrTy);
- Params.push_back(SelectorPtrTy);
+ const llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- Params, true),
+ llvm::FunctionType::get(CGM.VoidTy, params, true),
"objc_msgSendSuper2_stret");
}
@@ -282,107 +270,97 @@ public:
/// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
llvm::Constant *getGcReadWeakFn() {
// id objc_read_weak (id *)
- std::vector<const llvm::Type*> Args;
- Args.push_back(ObjectPtrTy->getPointerTo());
+ const llvm::Type *args[] = { ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
}
/// GcAssignWeakFn -- LLVM objc_assign_weak function.
llvm::Constant *getGcAssignWeakFn() {
// id objc_assign_weak (id, id *)
- std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
- Args.push_back(ObjectPtrTy->getPointerTo());
+ const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
}
/// GcAssignGlobalFn -- LLVM objc_assign_global function.
llvm::Constant *getGcAssignGlobalFn() {
// id objc_assign_global(id, id *)
- std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
- Args.push_back(ObjectPtrTy->getPointerTo());
+ const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
}
/// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
llvm::Constant *getGcAssignThreadLocalFn() {
// id objc_assign_threadlocal(id src, id * dest)
- std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
- Args.push_back(ObjectPtrTy->getPointerTo());
+ const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
}
/// GcAssignIvarFn -- LLVM objc_assign_ivar function.
llvm::Constant *getGcAssignIvarFn() {
// id objc_assign_ivar(id, id *, ptrdiff_t)
- std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
- Args.push_back(ObjectPtrTy->getPointerTo());
- Args.push_back(LongTy);
+ const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(),
+ CGM.PtrDiffTy };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
}
/// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
llvm::Constant *GcMemmoveCollectableFn() {
// void *objc_memmove_collectable(void *dst, const void *src, size_t size)
- std::vector<const llvm::Type*> Args(1, Int8PtrTy);
- Args.push_back(Int8PtrTy);
- Args.push_back(LongTy);
- llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
+ const llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy };
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
}
/// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
llvm::Constant *getGcAssignStrongCastFn() {
// id objc_assign_strongCast(id, id *)
- std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
- Args.push_back(ObjectPtrTy->getPointerTo());
+ const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
}
/// ExceptionThrowFn - LLVM objc_exception_throw function.
llvm::Constant *getExceptionThrowFn() {
// void objc_exception_throw(id)
- std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ const llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::FunctionType::get(CGM.VoidTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
}
/// ExceptionRethrowFn - LLVM objc_exception_rethrow function.
llvm::Constant *getExceptionRethrowFn() {
// void objc_exception_rethrow(void)
- std::vector<const llvm::Type*> Args;
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow");
}
/// SyncEnterFn - LLVM object_sync_enter function.
llvm::Constant *getSyncEnterFn() {
// void objc_sync_enter (id)
- std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ const llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::FunctionType::get(CGM.VoidTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
}
/// SyncExitFn - LLVM object_sync_exit function.
llvm::Constant *getSyncExitFn() {
// void objc_sync_exit (id)
- std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ const llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::FunctionType::get(CGM.VoidTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
}
@@ -474,55 +452,44 @@ public:
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
llvm::Constant *getExceptionTryEnterFn() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- Params, false),
+ llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_enter");
}
/// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
llvm::Constant *getExceptionTryExitFn() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- Params, false),
+ llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_exit");
}
/// ExceptionExtractFn - LLVM objc_exception_extract function.
llvm::Constant *getExceptionExtractFn() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, false),
+ params, false),
"objc_exception_extract");
-
}
/// ExceptionMatchFn - LLVM objc_exception_match function.
llvm::Constant *getExceptionMatchFn() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(ClassPtrTy);
- Params.push_back(ObjectPtrTy);
+ const llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy };
return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
- Params, false),
+ llvm::FunctionType::get(CGM.Int32Ty, params, false),
"objc_exception_match");
}
/// SetJmpFn - LLVM _setjmp function.
llvm::Constant *getSetJmpFn() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(llvm::Type::getInt32PtrTy(VMContext));
- return
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
- Params, false),
- "_setjmp");
-
+ // This is specifically the prototype for x86.
+ const llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty,
+ params, false),
+ "_setjmp");
}
public:
@@ -608,68 +575,56 @@ public:
llvm::Constant *getMessageSendFixupFn() {
// id objc_msgSend_fixup(id, struct message_ref_t*, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(ObjectPtrTy);
- Params.push_back(MessageRefPtrTy);
+ const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
+ params, true),
"objc_msgSend_fixup");
}
llvm::Constant *getMessageSendFpretFixupFn() {
// id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(ObjectPtrTy);
- Params.push_back(MessageRefPtrTy);
+ const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
+ params, true),
"objc_msgSend_fpret_fixup");
}
llvm::Constant *getMessageSendStretFixupFn() {
// id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(ObjectPtrTy);
- Params.push_back(MessageRefPtrTy);
+ const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
+ params, true),
"objc_msgSend_stret_fixup");
}
llvm::Constant *getMessageSendSuper2FixupFn() {
// id objc_msgSendSuper2_fixup (struct objc_super *,
// struct _super_message_ref_t*, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(SuperPtrTy);
- Params.push_back(SuperMessageRefPtrTy);
+ const llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
+ params, true),
"objc_msgSendSuper2_fixup");
}
llvm::Constant *getMessageSendSuper2StretFixupFn() {
// id objc_msgSendSuper2_stret_fixup(struct objc_super *,
// struct _super_message_ref_t*, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(SuperPtrTy);
- Params.push_back(SuperMessageRefPtrTy);
+ const llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
+ params, true),
"objc_msgSendSuper2_stret_fixup");
}
llvm::Constant *getObjCEndCatchFn() {
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- false),
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy, false),
"objc_end_catch");
}
llvm::Constant *getObjCBeginCatchFn() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(Int8PtrTy);
+ const llvm::Type *params[] = { Int8PtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
- Params, false),
+ params, false),
"objc_begin_catch");
}
@@ -865,16 +820,16 @@ protected:
unsigned Align,
bool AddToUsed);
- CodeGen::RValue EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
- ReturnValueSlot Return,
- QualType ResultType,
- llvm::Value *Sel,
- llvm::Value *Arg0,
- QualType Arg0Ty,
- bool IsSuper,
- const CallArgList &CallArgs,
- const ObjCMethodDecl *OMD,
- const ObjCCommonTypesHelper &ObjCTypes);
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *OMD,
+ const ObjCCommonTypesHelper &ObjCTypes);
/// EmitImageInfo - Emit the image info marker used to encode some module
/// level information.
@@ -1093,6 +1048,13 @@ public:
virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
+ assert(false && "CGObjCMac::GetClassGlobal");
+ return 0;
+ }
};
class CGObjCNonFragileABIMac : public CGObjCCommonMac {
@@ -1110,16 +1072,16 @@ private:
/// EHTypeReferences - uniqued class ehtype references.
llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences;
- /// NonLegacyDispatchMethods - List of methods for which we do *not* generate
- /// legacy messaging dispatch.
- llvm::DenseSet<Selector> NonLegacyDispatchMethods;
+ /// VTableDispatchMethods - List of methods for which we generate
+ /// vtable-based message dispatch.
+ llvm::DenseSet<Selector> VTableDispatchMethods;
/// DefinedMetaClasses - List of defined meta-classes.
std::vector<llvm::GlobalValue*> DefinedMetaClasses;
- /// LegacyDispatchedSelector - Returns true if SEL is not in the list of
- /// NonLegacyDispatchMethods; false otherwise.
- bool LegacyDispatchedSelector(Selector Sel);
+ /// isVTableDispatchedSelector - Returns true if SEL is a
+ /// vtable-based selector.
+ bool isVTableDispatchedSelector(Selector Sel);
/// FinishNonFragileABIModule - Write out global data structures at the end of
/// processing a translation unit.
@@ -1178,20 +1140,20 @@ private:
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end);
- CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
- ReturnValueSlot Return,
- QualType ResultType,
- Selector Sel,
- llvm::Value *Receiver,
- QualType Arg0Ty,
- bool IsSuper,
- const CallArgList &CallArgs,
- const ObjCMethodDecl *Method);
-
+ CodeGen::RValue EmitVTableMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
/// GetClassGlobal - Return the global variable for the Objective-C
/// class of the given name.
llvm::GlobalVariable *GetClassGlobal(const std::string &Name);
-
+
/// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given class reference.
llvm::Value *EmitClassRef(CGBuilderTy &Builder,
@@ -1347,6 +1309,46 @@ public:
const ObjCIvarDecl *Ivar);
};
+/// A helper class for performing the null-initialization of a return
+/// value.
+struct NullReturnState {
+ llvm::BasicBlock *NullBB;
+
+ NullReturnState() : NullBB(0) {}
+
+ void init(CodeGenFunction &CGF, llvm::Value *receiver) {
+ // Make blocks for the null-init and call edges.
+ NullBB = CGF.createBasicBlock("msgSend.nullinit");
+ llvm::BasicBlock *callBB = CGF.createBasicBlock("msgSend.call");
+
+ // Check for a null receiver and, if there is one, jump to the
+ // null-init test.
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(receiver);
+ CGF.Builder.CreateCondBr(isNull, NullBB, callBB);
+
+ // Otherwise, start performing the call.
+ CGF.EmitBlock(callBB);
+ }
+
+ RValue complete(CodeGenFunction &CGF, RValue result, QualType resultType) {
+ if (!NullBB) return result;
+
+ // Finish the call path.
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("msgSend.cont");
+ if (CGF.HaveInsertPoint()) CGF.Builder.CreateBr(contBB);
+
+ // Emit the null-init block and perform the null-initialization there.
+ CGF.EmitBlock(NullBB);
+ assert(result.isAggregate() && "null init of non-aggregate result?");
+ CGF.EmitNullInitialization(result.getAggregateAddr(), resultType);
+
+ // Jump to the continuation block.
+ CGF.EmitBlock(contBB);
+
+ return result;
+ }
+};
+
} // end anonymous namespace
/* *** Helper Functions *** */
@@ -1487,10 +1489,10 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
CGF.Builder.CreateStore(Target,
CGF.Builder.CreateStructGEP(ObjCSuper, 1));
- return EmitLegacyMessageSend(CGF, Return, ResultType,
- EmitSelector(CGF.Builder, Sel),
- ObjCSuper, ObjCTypes.SuperPtrCTy,
- true, CallArgs, Method, ObjCTypes);
+ return EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, ObjCTypes);
}
/// Generate code for a message send expression.
@@ -1502,23 +1504,23 @@ CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
const CallArgList &CallArgs,
const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method) {
- return EmitLegacyMessageSend(CGF, Return, ResultType,
- EmitSelector(CGF.Builder, Sel),
- Receiver, CGF.getContext().getObjCIdType(),
- false, CallArgs, Method, ObjCTypes);
+ return EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, ObjCTypes);
}
CodeGen::RValue
-CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
- ReturnValueSlot Return,
- QualType ResultType,
- llvm::Value *Sel,
- llvm::Value *Arg0,
- QualType Arg0Ty,
- bool IsSuper,
- const CallArgList &CallArgs,
- const ObjCMethodDecl *Method,
- const ObjCCommonTypesHelper &ObjCTypes) {
+CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method,
+ const ObjCCommonTypesHelper &ObjCTypes) {
CallArgList ActualArgs;
if (!IsSuper)
Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
@@ -1537,9 +1539,11 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
CGM.getContext().getCanonicalType(ResultType) &&
"Result type mismatch!");
+ NullReturnState nullReturn;
+
llvm::Constant *Fn = NULL;
if (CGM.ReturnTypeUsesSRet(FnInfo)) {
- EmitNullReturnInitialization(CGF, Return, ResultType);
+ if (!IsSuper) nullReturn.init(CGF, Arg0);
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
: ObjCTypes.getSendStretFn(IsSuper);
} else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
@@ -1550,7 +1554,8 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
: ObjCTypes.getSendFn(IsSuper);
}
Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy));
- return CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
+ RValue rvalue = CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
+ return nullReturn.complete(CGF, rvalue, ResultType);
}
static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
@@ -1681,6 +1686,7 @@ void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
if (DefinedProtocols.count(PD->getIdentifier()))
return GetOrEmitProtocol(PD);
+
return GetOrEmitProtocolRef(PD);
}
@@ -1714,6 +1720,9 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
ObjCMethodDecl *MD = *i;
llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptInstanceMethods.push_back(C);
} else {
@@ -1725,6 +1734,9 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
ObjCMethodDecl *MD = *i;
llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptClassMethods.push_back(C);
} else {
@@ -1968,6 +1980,9 @@ CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
ObjCTypes.SelectorPtrTy);
Desc[1] = GetMethodVarType(MD);
+ if (!Desc[1])
+ return 0;
+
return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
Desc);
}
@@ -2730,10 +2745,10 @@ void FragileHazards::collectLocals() {
}
llvm::FunctionType *FragileHazards::GetAsmFnType() {
- std::vector<const llvm::Type *> Tys(Locals.size());
- for (unsigned I = 0, E = Locals.size(); I != E; ++I)
- Tys[I] = Locals[I]->getType();
- return llvm::FunctionType::get(CGF.Builder.getVoidTy(), Tys, false);
+ llvm::SmallVector<const llvm::Type *, 16> tys(Locals.size());
+ for (unsigned i = 0, e = Locals.size(); i != e; ++i)
+ tys[i] = Locals[i]->getType();
+ return llvm::FunctionType::get(CGF.VoidTy, tys, false);
}
/*
@@ -3930,8 +3945,10 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D) {
std::string TypeStr;
- CGM.getContext().getObjCEncodingForMethodDecl(const_cast<ObjCMethodDecl*>(D),
- TypeStr);
+ if (CGM.getContext().getObjCEncodingForMethodDecl(
+ const_cast<ObjCMethodDecl*>(D),
+ TypeStr))
+ return 0;
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
@@ -4081,9 +4098,9 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
SourceLocation(), SourceLocation(),
&Ctx.Idents.get("_objc_super"));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.getObjCIdType(), 0, 0, false));
+ Ctx.getObjCIdType(), 0, 0, false, false));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.getObjCClassType(), 0, 0, false));
+ Ctx.getObjCClassType(), 0, 0, false, false));
RD->completeDefinition();
SuperCTy = Ctx.getTagDeclType(RD);
@@ -4480,11 +4497,9 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
ClassRonfABITy);
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(ObjectPtrTy);
- Params.push_back(SelectorPtrTy);
- ImpnfABITy = llvm::PointerType::getUnqual(
- llvm::FunctionType::get(ObjectPtrTy, Params, false));
+ const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ ImpnfABITy = llvm::FunctionType::get(ObjectPtrTy, params, false)
+ ->getPointerTo();
// struct _class_t {
// struct _class_t *isa;
@@ -4544,9 +4559,9 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
SourceLocation(), SourceLocation(),
&Ctx.Idents.get("_message_ref_t"));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.VoidPtrTy, 0, 0, false));
+ Ctx.VoidPtrTy, 0, 0, false, false));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.getObjCSelType(), 0, 0, false));
+ Ctx.getObjCSelType(), 0, 0, false, false));
RD->completeDefinition();
MessageRefCTy = Ctx.getTagDeclType(RD);
@@ -4658,56 +4673,68 @@ void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
EmitImageInfo();
}
-/// LegacyDispatchedSelector - Returns true if SEL is not in the list of
-/// NonLegacyDispatchMethods; false otherwise. What this means is that
+/// isVTableDispatchedSelector - Returns true if SEL is not in the list of
+/// VTableDispatchMethods; false otherwise. What this means is that
/// except for the 19 selectors in the list, we generate 32bit-style
/// message dispatch call for all the rest.
-///
-bool CGObjCNonFragileABIMac::LegacyDispatchedSelector(Selector Sel) {
+bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
+ // At various points we've experimented with using vtable-based
+ // dispatch for all methods.
switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
default:
- assert(0 && "Invalid dispatch method!");
+ llvm_unreachable("Invalid dispatch method!");
case CodeGenOptions::Legacy:
- return true;
- case CodeGenOptions::NonLegacy:
return false;
+ case CodeGenOptions::NonLegacy:
+ return true;
case CodeGenOptions::Mixed:
break;
}
// If so, see whether this selector is in the white-list of things which must
// use the new dispatch convention. We lazily build a dense set for this.
- if (NonLegacyDispatchMethods.empty()) {
- NonLegacyDispatchMethods.insert(GetNullarySelector("alloc"));
- NonLegacyDispatchMethods.insert(GetNullarySelector("class"));
- NonLegacyDispatchMethods.insert(GetNullarySelector("self"));
- NonLegacyDispatchMethods.insert(GetNullarySelector("isFlipped"));
- NonLegacyDispatchMethods.insert(GetNullarySelector("length"));
- NonLegacyDispatchMethods.insert(GetNullarySelector("count"));
- NonLegacyDispatchMethods.insert(GetNullarySelector("retain"));
- NonLegacyDispatchMethods.insert(GetNullarySelector("release"));
- NonLegacyDispatchMethods.insert(GetNullarySelector("autorelease"));
- NonLegacyDispatchMethods.insert(GetNullarySelector("hash"));
-
- NonLegacyDispatchMethods.insert(GetUnarySelector("allocWithZone"));
- NonLegacyDispatchMethods.insert(GetUnarySelector("isKindOfClass"));
- NonLegacyDispatchMethods.insert(GetUnarySelector("respondsToSelector"));
- NonLegacyDispatchMethods.insert(GetUnarySelector("objectForKey"));
- NonLegacyDispatchMethods.insert(GetUnarySelector("objectAtIndex"));
- NonLegacyDispatchMethods.insert(GetUnarySelector("isEqualToString"));
- NonLegacyDispatchMethods.insert(GetUnarySelector("isEqual"));
- NonLegacyDispatchMethods.insert(GetUnarySelector("addObject"));
- // "countByEnumeratingWithState:objects:count"
- IdentifierInfo *KeyIdents[] = {
- &CGM.getContext().Idents.get("countByEnumeratingWithState"),
- &CGM.getContext().Idents.get("objects"),
- &CGM.getContext().Idents.get("count")
- };
- NonLegacyDispatchMethods.insert(
- CGM.getContext().Selectors.getSelector(3, KeyIdents));
- }
-
- return (NonLegacyDispatchMethods.count(Sel) == 0);
+ if (VTableDispatchMethods.empty()) {
+ VTableDispatchMethods.insert(GetNullarySelector("alloc"));
+ VTableDispatchMethods.insert(GetNullarySelector("class"));
+ VTableDispatchMethods.insert(GetNullarySelector("self"));
+ VTableDispatchMethods.insert(GetNullarySelector("isFlipped"));
+ VTableDispatchMethods.insert(GetNullarySelector("length"));
+ VTableDispatchMethods.insert(GetNullarySelector("count"));
+
+ // These are vtable-based if GC is disabled.
+ // Optimistically use vtable dispatch for hybrid compiles.
+ if (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
+ VTableDispatchMethods.insert(GetNullarySelector("retain"));
+ VTableDispatchMethods.insert(GetNullarySelector("release"));
+ VTableDispatchMethods.insert(GetNullarySelector("autorelease"));
+ }
+
+ VTableDispatchMethods.insert(GetUnarySelector("allocWithZone"));
+ VTableDispatchMethods.insert(GetUnarySelector("isKindOfClass"));
+ VTableDispatchMethods.insert(GetUnarySelector("respondsToSelector"));
+ VTableDispatchMethods.insert(GetUnarySelector("objectForKey"));
+ VTableDispatchMethods.insert(GetUnarySelector("objectAtIndex"));
+ VTableDispatchMethods.insert(GetUnarySelector("isEqualToString"));
+ VTableDispatchMethods.insert(GetUnarySelector("isEqual"));
+
+ // These are vtable-based if GC is enabled.
+ // Optimistically use vtable dispatch for hybrid compiles.
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ VTableDispatchMethods.insert(GetNullarySelector("hash"));
+ VTableDispatchMethods.insert(GetUnarySelector("addObject"));
+
+ // "countByEnumeratingWithState:objects:count"
+ IdentifierInfo *KeyIdents[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ VTableDispatchMethods.insert(
+ CGM.getContext().Selectors.getSelector(3, KeyIdents));
+ }
+ }
+
+ return VTableDispatchMethods.count(Sel);
}
// Metadata flags
@@ -5209,7 +5236,7 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
else
IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
- IvarOffsetGV->setSection("__DATA, __objc_const");
+ IvarOffsetGV->setSection("__DATA, __objc_ivar");
return IvarOffsetGV;
}
@@ -5344,6 +5371,9 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
ObjCMethodDecl *MD = *i;
llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptInstanceMethods.push_back(C);
} else {
@@ -5355,6 +5385,9 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
ObjCMethodDecl *MD = *i;
llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptClassMethods.push_back(C);
} else {
@@ -5494,6 +5527,9 @@ CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
ObjCTypes.SelectorPtrTy);
Desc[1] = GetMethodVarType(MD);
+ if (!Desc[1])
+ return 0;
+
// Protocol methods have no implementation. So, this entry is always NULL.
Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
@@ -5523,90 +5559,131 @@ llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),"ivar");
}
-CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
- CodeGen::CodeGenFunction &CGF,
- ReturnValueSlot Return,
- QualType ResultType,
- Selector Sel,
- llvm::Value *Receiver,
- QualType Arg0Ty,
- bool IsSuper,
- const CallArgList &CallArgs,
- const ObjCMethodDecl *Method) {
- // FIXME. Even though IsSuper is passes. This function doese not handle calls
- // to 'super' receivers.
- CodeGenTypes &Types = CGM.getTypes();
- llvm::Value *Arg0 = Receiver;
- if (!IsSuper)
- Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+static void appendSelectorForMessageRefTable(std::string &buffer,
+ Selector selector) {
+ if (selector.isUnarySelector()) {
+ buffer += selector.getNameForSlot(0);
+ return;
+ }
- // Find the message function name.
- // FIXME. This is too much work to get the ABI-specific result type needed to
- // find the message name.
- const CGFunctionInfo &FnInfo
- = Types.getFunctionInfo(ResultType, CallArgList(),
- FunctionType::ExtInfo());
- llvm::Constant *Fn = 0;
- std::string Name("\01l_");
- if (CGM.ReturnTypeUsesSRet(FnInfo)) {
- EmitNullReturnInitialization(CGF, Return, ResultType);
- if (IsSuper) {
- Fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
- Name += "objc_msgSendSuper2_stret_fixup";
+ for (unsigned i = 0, e = selector.getNumArgs(); i != e; ++i) {
+ buffer += selector.getNameForSlot(i);
+ buffer += '_';
+ }
+}
+
+/// Emit a "v-table" message send. We emit a weak hidden-visibility
+/// struct, initially containing the selector pointer and a pointer to
+/// a "fixup" variant of the appropriate objc_msgSend. To call, we
+/// load and call the function pointer, passing the address of the
+/// struct as the second parameter. The runtime determines whether
+/// the selector is currently emitted using vtable dispatch; if so, it
+/// substitutes a stub function which simply tail-calls through the
+/// appropriate vtable slot, and if not, it substitues a stub function
+/// which tail-calls objc_msgSend. Both stubs adjust the selector
+/// argument to correctly point to the selector.
+RValue
+CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
+ ReturnValueSlot returnSlot,
+ QualType resultType,
+ Selector selector,
+ llvm::Value *arg0,
+ QualType arg0Type,
+ bool isSuper,
+ const CallArgList &formalArgs,
+ const ObjCMethodDecl *method) {
+ // Compute the actual arguments.
+ CallArgList args;
+
+ // First argument: the receiver / super-call structure.
+ if (!isSuper)
+ arg0 = CGF.Builder.CreateBitCast(arg0, ObjCTypes.ObjectPtrTy);
+ args.add(RValue::get(arg0), arg0Type);
+
+ // Second argument: a pointer to the message ref structure. Leave
+ // the actual argument value blank for now.
+ args.add(RValue::get(0), ObjCTypes.MessageRefCPtrTy);
+
+ args.insert(args.end(), formalArgs.begin(), formalArgs.end());
+
+ const CGFunctionInfo &fnInfo =
+ CGM.getTypes().getFunctionInfo(resultType, args,
+ FunctionType::ExtInfo());
+
+ NullReturnState nullReturn;
+
+ // Find the function to call and the mangled name for the message
+ // ref structure. Using a different mangled name wouldn't actually
+ // be a problem; it would just be a waste.
+ //
+ // The runtime currently never uses vtable dispatch for anything
+ // except normal, non-super message-sends.
+ // FIXME: don't use this for that.
+ llvm::Constant *fn = 0;
+ std::string messageRefName("\01l_");
+ if (CGM.ReturnTypeUsesSRet(fnInfo)) {
+ if (isSuper) {
+ fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
+ messageRefName += "objc_msgSendSuper2_stret_fixup";
} else {
- Fn = ObjCTypes.getMessageSendStretFixupFn();
- Name += "objc_msgSend_stret_fixup";
+ nullReturn.init(CGF, arg0);
+ fn = ObjCTypes.getMessageSendStretFixupFn();
+ messageRefName += "objc_msgSend_stret_fixup";
}
- } else if (!IsSuper && CGM.ReturnTypeUsesFPRet(ResultType)) {
- Fn = ObjCTypes.getMessageSendFpretFixupFn();
- Name += "objc_msgSend_fpret_fixup";
+ } else if (!isSuper && CGM.ReturnTypeUsesFPRet(resultType)) {
+ fn = ObjCTypes.getMessageSendFpretFixupFn();
+ messageRefName += "objc_msgSend_fpret_fixup";
} else {
- if (IsSuper) {
- Fn = ObjCTypes.getMessageSendSuper2FixupFn();
- Name += "objc_msgSendSuper2_fixup";
+ if (isSuper) {
+ fn = ObjCTypes.getMessageSendSuper2FixupFn();
+ messageRefName += "objc_msgSendSuper2_fixup";
} else {
- Fn = ObjCTypes.getMessageSendFixupFn();
- Name += "objc_msgSend_fixup";
+ fn = ObjCTypes.getMessageSendFixupFn();
+ messageRefName += "objc_msgSend_fixup";
}
}
- assert(Fn && "CGObjCNonFragileABIMac::EmitMessageSend");
- Name += '_';
- std::string SelName(Sel.getAsString());
- // Replace all ':' in selector name with '_' ouch!
- for (unsigned i = 0; i < SelName.size(); i++)
- if (SelName[i] == ':')
- SelName[i] = '_';
- Name += SelName;
- llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
- if (!GV) {
- // Build message ref table entry.
- std::vector<llvm::Constant*> Values(2);
- Values[0] = Fn;
- Values[1] = GetMethodVarName(Sel);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
- GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::WeakAnyLinkage,
- Init,
- Name);
- GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
- GV->setAlignment(16);
- GV->setSection("__DATA, __objc_msgrefs, coalesced");
- }
- llvm::Value *Arg1 = CGF.Builder.CreateBitCast(GV, ObjCTypes.MessageRefPtrTy);
-
- CallArgList ActualArgs;
- ActualArgs.add(RValue::get(Arg0), Arg0Ty);
- ActualArgs.add(RValue::get(Arg1), ObjCTypes.MessageRefCPtrTy);
- ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
- const CGFunctionInfo &FnInfo1 = Types.getFunctionInfo(ResultType, ActualArgs,
- FunctionType::ExtInfo());
- llvm::Value *Callee = CGF.Builder.CreateStructGEP(Arg1, 0);
- Callee = CGF.Builder.CreateLoad(Callee);
- const llvm::FunctionType *FTy =
- Types.GetFunctionType(FnInfo1, Method ? Method->isVariadic() : false);
- Callee = CGF.Builder.CreateBitCast(Callee,
- llvm::PointerType::getUnqual(FTy));
- return CGF.EmitCall(FnInfo1, Callee, Return, ActualArgs);
+ assert(fn && "CGObjCNonFragileABIMac::EmitMessageSend");
+ messageRefName += '_';
+
+ // Append the selector name, except use underscores anywhere we
+ // would have used colons.
+ appendSelectorForMessageRefTable(messageRefName, selector);
+
+ llvm::GlobalVariable *messageRef
+ = CGM.getModule().getGlobalVariable(messageRefName);
+ if (!messageRef) {
+ // Build the message ref structure.
+ llvm::Constant *values[] = { fn, GetMethodVarName(selector) };
+ llvm::Constant *init =
+ llvm::ConstantStruct::get(VMContext, values, 2, false);
+ messageRef = new llvm::GlobalVariable(CGM.getModule(),
+ init->getType(),
+ /*constant*/ false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ init,
+ messageRefName);
+ messageRef->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ messageRef->setAlignment(16);
+ messageRef->setSection("__DATA, __objc_msgrefs, coalesced");
+ }
+ llvm::Value *mref =
+ CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy);
+
+ // Update the message ref argument.
+ args[1].RV = RValue::get(mref);
+
+ // Load the function to call from the message ref table.
+ llvm::Value *callee = CGF.Builder.CreateStructGEP(mref, 0);
+ callee = CGF.Builder.CreateLoad(callee, "msgSend_fn");
+
+ bool variadic = method ? method->isVariadic() : false;
+ const llvm::FunctionType *fnType =
+ CGF.getTypes().GetFunctionType(fnInfo, variadic);
+ callee = CGF.Builder.CreateBitCast(callee,
+ llvm::PointerType::getUnqual(fnType));
+
+ RValue result = CGF.EmitCall(fnInfo, callee, returnSlot, args);
+ return nullReturn.complete(CGF, result, resultType);
}
/// Generate code for a message send expression in the nonfragile abi.
@@ -5619,14 +5696,14 @@ CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
const CallArgList &CallArgs,
const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method) {
- return LegacyDispatchedSelector(Sel)
- ? EmitLegacyMessageSend(CGF, Return, ResultType,
- EmitSelector(CGF.Builder, Sel),
+ return isVTableDispatchedSelector(Sel)
+ ? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
Receiver, CGF.getContext().getObjCIdType(),
- false, CallArgs, Method, ObjCTypes)
- : EmitMessageSend(CGF, Return, ResultType, Sel,
+ false, CallArgs, Method)
+ : EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
Receiver, CGF.getContext().getObjCIdType(),
- false, CallArgs, Method);
+ false, CallArgs, Method, ObjCTypes);
}
llvm::GlobalVariable *
@@ -5773,14 +5850,14 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
CGF.Builder.CreateStore(Target,
CGF.Builder.CreateStructGEP(ObjCSuper, 1));
- return (LegacyDispatchedSelector(Sel))
- ? EmitLegacyMessageSend(CGF, Return, ResultType,
- EmitSelector(CGF.Builder, Sel),
+ return (isVTableDispatchedSelector(Sel))
+ ? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
ObjCSuper, ObjCTypes.SuperPtrCTy,
- true, CallArgs, Method, ObjCTypes)
- : EmitMessageSend(CGF, Return, ResultType, Sel,
+ true, CallArgs, Method)
+ : EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
ObjCSuper, ObjCTypes.SuperPtrCTy,
- true, CallArgs, Method);
+ true, CallArgs, Method, ObjCTypes);
}
llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 3d854d4..21150f1 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -165,9 +165,9 @@ namespace {
void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
const ObjCAtTryStmt &S,
- llvm::Function *beginCatchFn,
- llvm::Function *endCatchFn,
- llvm::Function *exceptionRethrowFn) {
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *exceptionRethrowFn) {
// Jump destination for falling out of catch bodies.
CodeGenFunction::JumpDest Cont;
if (S.getNumCatchStmts())
@@ -233,6 +233,8 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
cast<llvm::CallInst>(Exn)->setDoesNotThrow();
}
+ CodeGenFunction::RunCleanupsScope cleanups(CGF);
+
if (endCatchFn) {
// Add a cleanup to leave the catch.
bool EndCatchMightThrow = (Handler.Variable == 0);
@@ -255,9 +257,8 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CGF.EmitStmt(Handler.Body);
CGF.ObjCEHValueStack.pop_back();
- // Leave the earlier cleanup.
- if (endCatchFn)
- CGF.PopCleanupBlock();
+ // Leave any cleanups associated with the catch.
+ cleanups.ForceCleanup();
CGF.EmitBranchThroughCleanup(Cont);
}
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 0cc2d82..866d5d8 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -95,9 +95,9 @@ protected:
/// thrown object directly.
void EmitTryCatchStmt(CodeGenFunction &CGF,
const ObjCAtTryStmt &S,
- llvm::Function *beginCatchFn,
- llvm::Function *endCatchFn,
- llvm::Function *exceptionRethrowFn);
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *exceptionRethrowFn);
/// Emits an @synchronize() statement, using the syncEnterFn and syncExitFn
/// arguments as the functions called to lock and unlock the object. This
/// function can be called by subclasses that use zero-cost exception
@@ -243,6 +243,7 @@ public:
llvm::Value *Size) = 0;
virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CodeGen::CGBlockInfo &blockInfo) = 0;
+ virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) = 0;
};
/// Creates an instance of an Objective-C runtime class.
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index a4ac390..0d72f85 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -234,7 +234,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
- bool IsSigned = FD->getType()->isSignedIntegerType();
+ bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
if (FieldSize > TypeSizeInBits) {
// We have a wide bit-field. The extra bits are only used for padding, so
@@ -753,8 +753,7 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
// Zero-length bitfields following non-bitfield members are
// ignored:
const FieldDecl *FD = (*Field);
- if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD) ||
- Types.getContext().ZeroBitfieldFollowsBitfield(FD, LastFD)) {
+ if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
--FieldNo;
continue;
}
@@ -997,8 +996,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are
// ignored:
- if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD) ||
- getContext().ZeroBitfieldFollowsBitfield(FD, LastFD)) {
+ if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
--i;
continue;
}
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 99bc3f4..a982621 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -773,10 +773,8 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
// As long as debug info is modeled with instructions, we have to ensure we
// have a place to insert here and write the stop point here.
- if (getDebugInfo()) {
- EnsureInsertPoint();
+ if (getDebugInfo() && HaveInsertPoint())
EmitStopPoint(&S);
- }
for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
I != E; ++I)
@@ -999,7 +997,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
// If we're looking for the case, just see if we can skip each of the
// substatements.
for (; Case && I != E; ++I) {
- HadSkippedDecl |= isa<DeclStmt>(I);
+ HadSkippedDecl |= isa<DeclStmt>(*I);
switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
case CSFC_Failure: return CSFC_Failure;
@@ -1224,7 +1222,7 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
while (*Constraint) {
switch (*Constraint) {
default:
- Result += Target.convertConstraint(*Constraint);
+ Result += Target.convertConstraint(Constraint);
break;
// Ignore these
case '*':
@@ -1422,8 +1420,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const Expr *OutExpr = S.getOutputExpr(i);
OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
- OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, Target,
- CGM, S);
+ OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
+ Target, CGM, S);
LValue Dest = EmitLValue(OutExpr);
if (!Constraints.empty())
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index a6849f8..aefc41e 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -411,6 +411,8 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
Out.flush();
llvm::StringRef Name = OutName.str();
+ ComputeVTableRelatedInformation(RD, /*VTableRequired=*/true);
+
VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
const llvm::Type *Int8PtrTy =
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 581467c..9ac5e67 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Format.h"
+#include "llvm/Transforms/Utils/Cloning.h"
#include <algorithm>
#include <cstdio>
@@ -2636,6 +2637,131 @@ static bool similar(const ABIArgInfo &infoL, CanQualType typeL,
}
#endif
+static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
+ QualType ResultType, RValue RV,
+ const ThunkInfo &Thunk) {
+ // Emit the return adjustment.
+ bool NullCheckValue = !ResultType->isReferenceType();
+
+ llvm::BasicBlock *AdjustNull = 0;
+ llvm::BasicBlock *AdjustNotNull = 0;
+ llvm::BasicBlock *AdjustEnd = 0;
+
+ llvm::Value *ReturnValue = RV.getScalarVal();
+
+ if (NullCheckValue) {
+ AdjustNull = CGF.createBasicBlock("adjust.null");
+ AdjustNotNull = CGF.createBasicBlock("adjust.notnull");
+ AdjustEnd = CGF.createBasicBlock("adjust.end");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue);
+ CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
+ CGF.EmitBlock(AdjustNotNull);
+ }
+
+ ReturnValue = PerformTypeAdjustment(CGF, ReturnValue,
+ Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ if (NullCheckValue) {
+ CGF.Builder.CreateBr(AdjustEnd);
+ CGF.EmitBlock(AdjustNull);
+ CGF.Builder.CreateBr(AdjustEnd);
+ CGF.EmitBlock(AdjustEnd);
+
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2);
+ PHI->addIncoming(ReturnValue, AdjustNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
+ AdjustNull);
+ ReturnValue = PHI;
+ }
+
+ return RValue::get(ReturnValue);
+}
+
+// This function does roughly the same thing as GenerateThunk, but in a
+// very different way, so that va_start and va_end work correctly.
+// FIXME: This function assumes "this" is the first non-sret LLVM argument of
+// a function, and that there is an alloca built in the entry block
+// for all accesses to "this".
+// FIXME: This function assumes there is only one "ret" statement per function.
+// FIXME: Cloning isn't correct in the presence of indirect goto!
+// FIXME: This implementation of thunks bloats codesize by duplicating the
+// function definition. There are alternatives:
+// 1. Add some sort of stub support to LLVM for cases where we can
+// do a this adjustment, then a sibcall.
+// 2. We could transform the definition to take a va_list instead of an
+// actual variable argument list, then have the thunks (including a
+// no-op thunk for the regular definition) call va_start/va_end.
+// There's a bit of per-call overhead for this solution, but it's
+// better for codesize if the definition is long.
+void CodeGenFunction::GenerateVarArgsThunk(
+ llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+
+ // Get the original function
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(FnInfo, /*IsVariadic*/true);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+ llvm::Function *BaseFn = cast<llvm::Function>(Callee);
+
+ // Clone to thunk.
+ llvm::Function *NewFn = llvm::CloneFunction(BaseFn);
+ CGM.getModule().getFunctionList().push_back(NewFn);
+ Fn->replaceAllUsesWith(NewFn);
+ NewFn->takeName(Fn);
+ Fn->eraseFromParent();
+ Fn = NewFn;
+
+ // "Initialize" CGF (minimally).
+ CurFn = Fn;
+
+ // Get the "this" value
+ llvm::Function::arg_iterator AI = Fn->arg_begin();
+ if (CGM.ReturnTypeUsesSRet(FnInfo))
+ ++AI;
+
+ // Find the first store of "this", which will be to the alloca associated
+ // with "this".
+ llvm::Value *ThisPtr = &*AI;
+ llvm::BasicBlock *EntryBB = Fn->begin();
+ llvm::Instruction *ThisStore = 0;
+ for (llvm::BasicBlock::iterator I = EntryBB->begin(), E = EntryBB->end();
+ I != E; I++) {
+ if (isa<llvm::StoreInst>(I) && I->getOperand(0) == ThisPtr) {
+ ThisStore = cast<llvm::StoreInst>(I);
+ break;
+ }
+ }
+ assert(ThisStore && "Store of this should be in entry block?");
+ // Adjust "this", if necessary.
+ Builder.SetInsertPoint(ThisStore);
+ llvm::Value *AdjustedThisPtr =
+ PerformTypeAdjustment(*this, ThisPtr,
+ Thunk.This.NonVirtual,
+ Thunk.This.VCallOffsetOffset);
+ ThisStore->setOperand(0, AdjustedThisPtr);
+
+ if (!Thunk.Return.isEmpty()) {
+ // Fix up the returned value, if necessary.
+ for (llvm::Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) {
+ llvm::Instruction *T = I->getTerminator();
+ if (isa<llvm::ReturnInst>(T)) {
+ RValue RV = RValue::get(T->getOperand(0));
+ T->eraseFromParent();
+ Builder.SetInsertPoint(&*I);
+ RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
+ Builder.CreateRet(RV.getScalarVal());
+ break;
+ }
+ }
+ }
+}
+
void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
GlobalDecl GD, const ThunkInfo &Thunk) {
@@ -2715,45 +2841,8 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
// Now emit our call.
RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD);
- if (!Thunk.Return.isEmpty()) {
- // Emit the return adjustment.
- bool NullCheckValue = !ResultType->isReferenceType();
-
- llvm::BasicBlock *AdjustNull = 0;
- llvm::BasicBlock *AdjustNotNull = 0;
- llvm::BasicBlock *AdjustEnd = 0;
-
- llvm::Value *ReturnValue = RV.getScalarVal();
-
- if (NullCheckValue) {
- AdjustNull = createBasicBlock("adjust.null");
- AdjustNotNull = createBasicBlock("adjust.notnull");
- AdjustEnd = createBasicBlock("adjust.end");
-
- llvm::Value *IsNull = Builder.CreateIsNull(ReturnValue);
- Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
- EmitBlock(AdjustNotNull);
- }
-
- ReturnValue = PerformTypeAdjustment(*this, ReturnValue,
- Thunk.Return.NonVirtual,
- Thunk.Return.VBaseOffsetOffset);
-
- if (NullCheckValue) {
- Builder.CreateBr(AdjustEnd);
- EmitBlock(AdjustNull);
- Builder.CreateBr(AdjustEnd);
- EmitBlock(AdjustEnd);
-
- llvm::PHINode *PHI = Builder.CreatePHI(ReturnValue->getType(), 2);
- PHI->addIncoming(ReturnValue, AdjustNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
- AdjustNull);
- ReturnValue = PHI;
- }
-
- RV = RValue::get(ReturnValue);
- }
+ if (!Thunk.Return.isEmpty())
+ RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
if (!ResultType->isVoidType() && Slot.isNull())
CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
@@ -2823,8 +2912,18 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
return;
}
- // Actually generate the thunk body.
- CodeGenFunction(CGM).GenerateThunk(ThunkFn, FnInfo, GD, Thunk);
+ if (ThunkFn->isVarArg()) {
+ // Varargs thunks are special; we can't just generate a call because
+ // we can't copy the varargs. Our implementation is rather
+ // expensive/sucky at the moment, so don't generate the thunk unless
+ // we have to.
+ // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly.
+ if (!UseAvailableExternallyLinkage)
+ CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk);
+ } else {
+ // Normal thunk body generation.
+ CodeGenFunction(CGM).GenerateThunk(ThunkFn, FnInfo, GD, Thunk);
+ }
if (UseAvailableExternallyLinkage)
ThunkFn->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
@@ -3076,7 +3175,7 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
Out.flush();
llvm::StringRef Name = OutName.str();
- ComputeVTableRelatedInformation(RD, true);
+ ComputeVTableRelatedInformation(RD, /*VTableRequired=*/true);
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 626c2b0..150cb69 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -33,9 +33,9 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()),
BlockInfo(0), BlockPointer(0),
NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
- ExceptionSlot(0), DebugInfo(0), DisableDebugInfo(false), IndirectBranch(0),
- SwitchInsn(0), CaseRangeBlock(0),
- DidCallStackSave(false), UnreachableBlock(0),
+ ExceptionSlot(0), EHSelectorSlot(0),
+ DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
+ IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0),
CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
OutermostConditional(0), TerminateLandingPad(0), TerminateHandler(0),
TrapBB(0) {
@@ -44,10 +44,6 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
CGM.getCXXABI().getMangleContext().startNewFunction();
}
-ASTContext &CodeGenFunction::getContext() const {
- return CGM.getContext();
-}
-
const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
@@ -57,9 +53,41 @@ const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
return CGM.getTypes().ConvertType(T);
}
-bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
- return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
- T->isObjCObjectType();
+bool CodeGenFunction::hasAggregateLLVMType(QualType type) {
+ switch (type.getCanonicalType()->getTypeClass()) {
+#define TYPE(name, parent)
+#define ABSTRACT_TYPE(name, parent)
+#define NON_CANONICAL_TYPE(name, parent) case Type::name:
+#define DEPENDENT_TYPE(name, parent) case Type::name:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("non-canonical or dependent type in IR-generation");
+
+ case Type::Builtin:
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Enum:
+ case Type::ObjCObjectPointer:
+ return false;
+
+ // Complexes, arrays, records, and Objective-C objects.
+ case Type::Complex:
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::Record:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ return true;
+ }
+ llvm_unreachable("unknown type kind!");
}
void CodeGenFunction::EmitReturnBlock() {
@@ -168,7 +196,7 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
bool CodeGenFunction::ShouldInstrumentFunction() {
if (!CGM.getCodeGenOpts().InstrumentFunctions)
return false;
- if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
+ if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
return false;
return true;
}
@@ -177,16 +205,12 @@ bool CodeGenFunction::ShouldInstrumentFunction() {
/// instrumentation function with the current function and the call site, if
/// function instrumentation is enabled.
void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
- const llvm::PointerType *PointerTy;
- const llvm::FunctionType *FunctionTy;
- std::vector<const llvm::Type*> ProfileFuncArgs;
-
// void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
- PointerTy = Int8PtrTy;
- ProfileFuncArgs.push_back(PointerTy);
- ProfileFuncArgs.push_back(PointerTy);
- FunctionTy = llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- ProfileFuncArgs, false);
+ const llvm::PointerType *PointerTy = Int8PtrTy;
+ const llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
+ const llvm::FunctionType *FunctionTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
+ ProfileFuncArgs, false);
llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
llvm::CallInst *CallSite = Builder.CreateCall(
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 169c576..bb8fd8e 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -603,6 +603,10 @@ public:
/// exception pointer into this alloca.
llvm::Value *ExceptionSlot;
+ /// The selector slot. Under the MandatoryCleanup model, all
+ /// landing pads write the current selector value into this alloca.
+ llvm::AllocaInst *EHSelectorSlot;
+
/// Emits a landing pad for the current EH stack.
llvm::BasicBlock *EmitLandingPad();
@@ -951,6 +955,10 @@ private:
CGDebugInfo *DebugInfo;
bool DisableDebugInfo;
+ /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
+ /// calling llvm.stacksave for multiple VLAs in the same scope.
+ bool DidCallStackSave;
+
/// IndirectBranch - The first time an indirect goto is seen we create a block
/// with an indirect branch. Every time we see the address of a label taken,
/// we add the label to the indirect goto. Every subsequent indirect goto is
@@ -997,10 +1005,6 @@ private:
// enter/leave scopes.
llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
- /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
- /// calling llvm.stacksave for multiple VLAs in the same scope.
- bool DidCallStackSave;
-
/// A block containing a single 'unreachable' instruction. Created
/// lazily by getUnreachableBlock().
llvm::BasicBlock *UnreachableBlock;
@@ -1035,7 +1039,7 @@ public:
CodeGenFunction(CodeGenModule &cgm);
CodeGenTypes &getTypes() const { return CGM.getTypes(); }
- ASTContext &getContext() const;
+ ASTContext &getContext() const { return CGM.getContext(); }
CGDebugInfo *getDebugInfo() {
if (DisableDebugInfo)
return NULL;
@@ -1050,6 +1054,7 @@ public:
/// Returns a pointer to the function's exception object slot, which
/// is assigned in every landing pad.
llvm::Value *getExceptionSlot();
+ llvm::Value *getEHSelectorSlot();
llvm::Value *getNormalCleanupDestSlot();
llvm::Value *getEHCleanupDestSlot();
@@ -1076,7 +1081,8 @@ public:
void GenerateObjCMethod(const ObjCMethodDecl *OMD);
void StartObjCMethod(const ObjCMethodDecl *MD,
- const ObjCContainerDecl *CD);
+ const ObjCContainerDecl *CD,
+ SourceLocation StartLoc);
/// GenerateObjCGetter - Synthesize an Objective-C property getter function.
void GenerateObjCGetter(ObjCImplementationDecl *IMP,
@@ -1157,6 +1163,9 @@ public:
void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
GlobalDecl GD, const ThunkInfo &Thunk);
+ void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk);
+
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
FunctionArgList &Args);
@@ -1701,6 +1710,7 @@ public:
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+ llvm::Constant *getUnwindResumeFn();
llvm::Constant *getUnwindResumeOrRethrowFn();
void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
@@ -1915,6 +1925,9 @@ public:
RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
ReturnValueSlot ReturnValue);
+ llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ llvm::Value *This);
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
ReturnValueSlot ReturnValue);
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 83e927f..7a1a968 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -90,9 +90,10 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
// Initialize the type cache.
llvm::LLVMContext &LLVMContext = M.getContext();
- Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
- Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
- Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+ VoidTy = llvm::Type::getVoidTy(LLVMContext);
+ Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
PointerWidthInBits = C.Target.getPointerWidth(0);
PointerAlignInBytes =
C.toCharUnitsFromBits(C.Target.getPointerAlign(0)).getQuantity();
@@ -132,6 +133,9 @@ void CodeGenModule::Release() {
if (getCodeGenOpts().EmitDeclMetadata)
EmitDeclMetadata();
+
+ if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
+ EmitCoverageFile();
}
void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
@@ -339,8 +343,7 @@ void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
// Ctor function type is void()*.
- llvm::FunctionType* CtorFTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false);
+ llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
// Get the type of a ctor entry, { i32, void ()* }.
@@ -449,6 +452,9 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
+ if (CodeGenOpts.UnwindTables)
+ F->setHasUWTable();
+
if (!Features.Exceptions && !Features.ObjCNonFragileABI)
F->addFnAttr(llvm::Attribute::NoUnwind);
@@ -724,7 +730,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
}
// Forward declarations are emitted lazily on first use.
- if (!FD->isThisDeclarationADefinition())
+ if (!FD->doesThisDeclarationHaveABody())
return;
} else {
const VarDecl *VD = cast<VarDecl>(Global);
@@ -790,14 +796,19 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
return;
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ // Make sure to emit the definition(s) before we emit the thunks.
+ // This is necessary for the generation of certain thunks.
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
+ EmitCXXConstructor(CD, GD.getCtorType());
+ else if (const CXXDestructorDecl *DD =dyn_cast<CXXDestructorDecl>(Method))
+ EmitCXXDestructor(DD, GD.getDtorType());
+ else
+ EmitGlobalFunctionDefinition(GD);
+
if (Method->isVirtual())
getVTables().EmitThunks(GD);
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
- return EmitCXXConstructor(CD, GD.getCtorType());
-
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(Method))
- return EmitCXXDestructor(DD, GD.getDtorType());
+ return;
}
return EmitGlobalFunctionDefinition(GD);
@@ -848,7 +859,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
if (isa<llvm::FunctionType>(Ty)) {
FTy = cast<llvm::FunctionType>(Ty);
} else {
- FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false);
+ FTy = llvm::FunctionType::get(VoidTy, false);
IsIncompleteFunction = true;
}
@@ -889,7 +900,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
assert(FD->isUsed() && "Sema didn't mark implicit function as used!");
DeferredDeclsToEmit.push_back(D.getWithDecl(FD));
break;
- } else if (FD->isThisDeclarationADefinition()) {
+ } else if (FD->doesThisDeclarationHaveABody()) {
DeferredDeclsToEmit.push_back(D.getWithDecl(FD));
break;
}
@@ -930,14 +941,19 @@ CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false);
}
-static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D) {
+static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D,
+ bool ConstantInit) {
if (!D->getType().isConstant(Context) && !D->getType()->isReferenceType())
return false;
- if (Context.getLangOptions().CPlusPlus &&
- Context.getBaseElementType(D->getType())->getAs<RecordType>()) {
- // FIXME: We should do something fancier here!
- return false;
+
+ if (Context.getLangOptions().CPlusPlus) {
+ if (const RecordType *Record
+ = Context.getBaseElementType(D->getType())->getAs<RecordType>())
+ return ConstantInit &&
+ cast<CXXRecordDecl>(Record->getDecl())->isPOD() &&
+ !cast<CXXRecordDecl>(Record->getDecl())->hasMutableFields();
}
+
return true;
}
@@ -994,7 +1010,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
if (D) {
// FIXME: This code is overly simple and should be merged with other global
// handling.
- GV->setConstant(DeclIsConstantGlobal(Context, D));
+ GV->setConstant(DeclIsConstantGlobal(Context, D, false));
// Set linkage and visibility in case we never see a definition.
NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
@@ -1109,7 +1125,7 @@ void CodeGenModule::EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired) {
llvm::GlobalVariable::LinkageTypes
CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
- if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
+ if (RD->getLinkage() != ExternalLinkage)
return llvm::GlobalVariable::InternalLinkage;
if (const CXXMethodDecl *KeyFunction
@@ -1276,7 +1292,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
// If it is safe to mark the global 'constant', do so now.
GV->setConstant(false);
- if (!NonConstInit && DeclIsConstantGlobal(Context, D))
+ if (!NonConstInit && DeclIsConstantGlobal(Context, D, true))
GV->setConstant(true);
GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
@@ -1561,14 +1577,24 @@ llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
"isn't a lib fn");
// Get the name, skip over the __builtin_ prefix (if necessary).
- const char *Name = Context.BuiltinInfo.GetName(BuiltinID);
- if (Context.BuiltinInfo.isLibFunction(BuiltinID))
- Name += 10;
+ llvm::StringRef Name;
+ GlobalDecl D(FD);
+
+ // If the builtin has been declared explicitly with an assembler label,
+ // use the mangled name. This differs from the plain label on platforms
+ // that prefix labels.
+ if (FD->hasAttr<AsmLabelAttr>())
+ Name = getMangledName(D);
+ else if (Context.BuiltinInfo.isLibFunction(BuiltinID))
+ Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
+ else
+ Name = Context.BuiltinInfo.GetName(BuiltinID);
+
const llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
- return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl(FD), /*ForVTable=*/false);
+ return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
}
llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
@@ -1628,6 +1654,16 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
return Map.GetOrCreateValue(llvm::StringRef(AsBytes.data(), AsBytes.size()));
}
+static llvm::StringMapEntry<llvm::Constant*> &
+GetConstantStringEntry(llvm::StringMap<llvm::Constant*> &Map,
+ const StringLiteral *Literal,
+ unsigned &StringLength)
+{
+ llvm::StringRef String = Literal->getString();
+ StringLength = String.size();
+ return Map.GetOrCreateValue(String);
+}
+
llvm::Constant *
CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
unsigned StringLength = 0;
@@ -1721,11 +1757,8 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::Constant *
CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
unsigned StringLength = 0;
- bool isUTF16 = false;
llvm::StringMapEntry<llvm::Constant*> &Entry =
- GetConstantCFStringEntry(CFConstantStringMap, Literal,
- getTargetData().isLittleEndian(),
- isUTF16, StringLength);
+ GetConstantStringEntry(CFConstantStringMap, Literal, StringLength);
if (llvm::Constant *C = Entry.getValue())
return C;
@@ -1738,24 +1771,26 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
if (!ConstantStringClassRef) {
std::string StringClass(getLangOptions().ObjCConstantStringClass);
const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
- Ty = llvm::ArrayType::get(Ty, 0);
llvm::Constant *GV;
- if (StringClass.empty())
- GV = CreateRuntimeVariable(Ty,
- Features.ObjCNonFragileABI ?
- "OBJC_CLASS_$_NSConstantString" :
- "_NSConstantStringClassReference");
- else {
- std::string str;
- if (Features.ObjCNonFragileABI)
- str = "OBJC_CLASS_$_" + StringClass;
- else
- str = "_" + StringClass + "ClassReference";
- GV = CreateRuntimeVariable(Ty, str);
+ if (Features.ObjCNonFragileABI) {
+ std::string str =
+ StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
+ : "OBJC_CLASS_$_" + StringClass;
+ GV = getObjCRuntime().GetClassGlobal(str);
+ // Make sure the result is of the correct type.
+ const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ ConstantStringClassRef =
+ llvm::ConstantExpr::getBitCast(GV, PTy);
+ } else {
+ std::string str =
+ StringClass.empty() ? "_NSConstantStringClassReference"
+ : "_" + StringClass + "ClassReference";
+ const llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
+ GV = CreateRuntimeVariable(PTy, str);
+ // Decay array -> ptr
+ ConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
}
- // Decay array -> ptr
- ConstantStringClassRef =
- llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
}
QualType NSTy = getContext().getNSConstantStringType();
@@ -1773,28 +1808,15 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
llvm::GlobalValue::LinkageTypes Linkage;
bool isConstant;
- if (isUTF16) {
- // FIXME: why do utf strings get "_" labels instead of "L" labels?
- Linkage = llvm::GlobalValue::InternalLinkage;
- // Note: -fwritable-strings doesn't make unicode NSStrings writable, but
- // does make plain ascii ones writable.
- isConstant = true;
- } else {
- Linkage = llvm::GlobalValue::PrivateLinkage;
- isConstant = !Features.WritableStrings;
- }
+ Linkage = llvm::GlobalValue::PrivateLinkage;
+ isConstant = !Features.WritableStrings;
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
".str");
GV->setUnnamedAddr(true);
- if (isUTF16) {
- CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
- GV->setAlignment(Align.getQuantity());
- } else {
- CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
- GV->setAlignment(Align.getQuantity());
- }
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
+ GV->setAlignment(Align.getQuantity());
Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
// String length.
@@ -1877,6 +1899,7 @@ static llvm::Constant *GenerateStringLiteral(llvm::StringRef str,
new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
llvm::GlobalValue::PrivateLinkage,
C, GlobalName);
+ GV->setAlignment(1);
GV->setUnnamedAddr(true);
return GV;
}
@@ -2057,7 +2080,9 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::UsingDirective:
case Decl::ClassTemplate:
case Decl::FunctionTemplate:
+ case Decl::TypeAliasTemplate:
case Decl::NamespaceAlias:
+ case Decl::Block:
break;
case Decl::CXXConstructor:
// Skip function templates
@@ -2216,6 +2241,23 @@ void CodeGenFunction::EmitDeclMetadata() {
}
}
+void CodeGenModule::EmitCoverageFile() {
+ if (!getCodeGenOpts().CoverageFile.empty()) {
+ if (llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu")) {
+ llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+ llvm::MDString *CoverageFile =
+ llvm::MDString::get(Ctx, getCodeGenOpts().CoverageFile);
+ for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
+ llvm::MDNode *CU = CUNode->getOperand(i);
+ llvm::Value *node[] = { CoverageFile, CU };
+ llvm::MDNode *N = llvm::MDNode::get(Ctx, node);
+ GCov->addOperand(N);
+ }
+ }
+ }
+}
+
///@name Custom Runtime Function Interfaces
///@{
//
@@ -2234,14 +2276,11 @@ llvm::Constant *CodeGenModule::getBlockObjectDispose() {
}
// Otherwise construct the function by hand.
- const llvm::FunctionType *FTy;
- std::vector<const llvm::Type*> ArgTys;
- const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
- ArgTys.push_back(Int8PtrTy);
- ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
- FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ const llvm::Type *args[] = { Int8PtrTy, Int32Ty };
+ const llvm::FunctionType *fty
+ = llvm::FunctionType::get(VoidTy, args, false);
return BlockObjectDispose =
- CreateRuntimeFunction(FTy, "_Block_object_dispose");
+ CreateRuntimeFunction(fty, "_Block_object_dispose");
}
llvm::Constant *CodeGenModule::getBlockObjectAssign() {
@@ -2256,15 +2295,11 @@ llvm::Constant *CodeGenModule::getBlockObjectAssign() {
}
// Otherwise construct the function by hand.
- const llvm::FunctionType *FTy;
- std::vector<const llvm::Type*> ArgTys;
- const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
- ArgTys.push_back(Int8PtrTy);
- ArgTys.push_back(Int8PtrTy);
- ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
- FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ const llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
+ const llvm::FunctionType *fty
+ = llvm::FunctionType::get(VoidTy, args, false);
return BlockObjectAssign =
- CreateRuntimeFunction(FTy, "_Block_object_assign");
+ CreateRuntimeFunction(fty, "_Block_object_assign");
}
llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 99c973c..779a352 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -97,16 +97,20 @@ namespace CodeGen {
};
struct CodeGenTypeCache {
+ /// void
+ const llvm::Type *VoidTy;
+
/// i8, i32, and i64
const llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty;
/// int
const llvm::IntegerType *IntTy;
- /// intptr_t and size_t, which we assume are the same
+ /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
union {
const llvm::IntegerType *IntPtrTy;
const llvm::IntegerType *SizeTy;
+ const llvm::IntegerType *PtrDiffTy;
};
/// void* in address space 0
@@ -735,6 +739,10 @@ private:
void EmitDeclMetadata();
+ /// EmitCoverageFile - Emit the llvm.gcov metadata used to tell LLVM where
+ /// to emit the .gcno and .gcda files in a way that persists in .bc files.
+ void EmitCoverageFile();
+
/// MayDeferGeneration - Determine if the given decl can be emitted
/// lazily; this is only relevant for definitions. The given decl
/// must be either a function or var decl.
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index dc383cb..ff1eb4c 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -220,8 +220,9 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// GetExpandedTypes - Expand the type \arg Ty into the LLVM
/// argument types it would be passed as on the provided vector \arg
/// ArgTys. See ABIArgInfo::Expand.
- void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys,
- bool IsRecursive);
+ void GetExpandedTypes(QualType type,
+ llvm::SmallVectorImpl<const llvm::Type*> &expanded,
+ bool isRecursive);
/// IsZeroInitializable - Return whether a type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 33abf3a..12ef9bd 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -1007,11 +1007,9 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
const llvm::PointerType *GuardPtrTy) {
// int __cxa_guard_acquire(__guard *guard_object);
-
- std::vector<const llvm::Type*> Args(1, GuardPtrTy);
const llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
- Args, /*isVarArg=*/false);
+ GuardPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
}
@@ -1019,12 +1017,9 @@ static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
const llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_release(__guard *guard_object);
-
- std::vector<const llvm::Type*> Args(1, GuardPtrTy);
-
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- Args, /*isVarArg=*/false);
+ GuardPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
}
@@ -1032,12 +1027,9 @@ static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
const llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_abort(__guard *guard_object);
-
- std::vector<const llvm::Type*> Args(1, GuardPtrTy);
-
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- Args, /*isVarArg=*/false);
+ GuardPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
}
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index 8945028..4a2c4abbe 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -79,7 +79,7 @@ namespace {
MEnd = D->decls_end();
M != MEnd; ++M)
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*M))
- if (Method->isThisDeclarationADefinition() &&
+ if (Method->doesThisDeclarationHaveABody() &&
(Method->hasAttr<UsedAttr>() ||
Method->hasAttr<ConstructorAttr>()))
Builder->EmitTopLevelDecl(Method);
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index bc2472c..043ead7 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -648,7 +648,7 @@ ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
if (StackAlign == 0)
- return ABIArgInfo::getIndirect(0);
+ return ABIArgInfo::getIndirect(4);
// If the stack alignment is less than the type alignment, realign the
// argument.
@@ -1315,13 +1315,10 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
- // Compute the byval alignment. We trust the back-end to honor the
- // minimum ABI alignment for byval, to make cleaner IR.
- const unsigned MinABIAlign = 8;
- unsigned Align = getContext().getTypeAlign(Ty) / 8;
- if (Align > MinABIAlign)
- return ABIArgInfo::getIndirect(Align);
- return ABIArgInfo::getIndirect(0);
+ // Compute the byval alignment. We specify the alignment of the byval in all
+ // cases so that the mid-level optimizer knows the alignment of the byval.
+ unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
+ return ABIArgInfo::getIndirect(Align);
}
/// Get16ByteVectorType - The ABI specifies that a value should be passed in an
@@ -2279,6 +2276,22 @@ public:
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
return 13;
}
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+
+ // 0-15 are the 16 integer registers.
+ AssignToArrayRange(Builder, Address, Four8, 0, 15);
+
+ return false;
+ }
+
+
};
}
@@ -2845,10 +2858,21 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
//===----------------------------------------------------------------------===//
namespace {
+class MipsABIInfo : public ABIInfo {
+public:
+ MipsABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
public:
MIPSTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ : TargetCodeGenInfo(new MipsABIInfo(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
return 29;
@@ -2859,6 +2883,54 @@ public:
};
}
+ABIArgInfo MipsABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty)) {
+ // Ignore empty aggregates.
+ if (getContext().getTypeSize(Ty) == 0)
+ return ABIArgInfo::getIgnore();
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy)) {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+}
+
+llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
bool
MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
OpenPOWER on IntegriCloud