summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h12
-rw-r--r--lib/CodeGen/BackendUtil.cpp46
-rw-r--r--lib/CodeGen/CGBlocks.cpp316
-rw-r--r--lib/CodeGen/CGBlocks.h3
-rw-r--r--lib/CodeGen/CGBuiltin.cpp348
-rw-r--r--lib/CodeGen/CGCXXABI.cpp2
-rw-r--r--lib/CodeGen/CGCXXABI.h2
-rw-r--r--lib/CodeGen/CGCall.cpp555
-rw-r--r--lib/CodeGen/CGCall.h47
-rw-r--r--lib/CodeGen/CGClass.cpp390
-rw-r--r--lib/CodeGen/CGCleanup.cpp35
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp51
-rw-r--r--lib/CodeGen/CGDebugInfo.h8
-rw-r--r--lib/CodeGen/CGDecl.cpp770
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp162
-rw-r--r--lib/CodeGen/CGException.cpp212
-rw-r--r--lib/CodeGen/CGExpr.cpp450
-rw-r--r--lib/CodeGen/CGExprAgg.cpp319
-rw-r--r--lib/CodeGen/CGExprCXX.cpp147
-rw-r--r--lib/CodeGen/CGExprComplex.cpp93
-rw-r--r--lib/CodeGen/CGExprConstant.cpp42
-rw-r--r--lib/CodeGen/CGExprScalar.cpp494
-rw-r--r--lib/CodeGen/CGObjC.cpp1255
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp224
-rw-r--r--lib/CodeGen/CGObjCMac.cpp791
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp28
-rw-r--r--lib/CodeGen/CGObjCRuntime.h8
-rw-r--r--lib/CodeGen/CGRTTI.cpp4
-rw-r--r--lib/CodeGen/CGRecordLayout.h16
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp69
-rw-r--r--lib/CodeGen/CGStmt.cpp32
-rw-r--r--lib/CodeGen/CGTemporaries.cpp9
-rw-r--r--lib/CodeGen/CGVTT.cpp3
-rw-r--r--lib/CodeGen/CGVTables.cpp5
-rw-r--r--lib/CodeGen/CGVTables.h2
-rw-r--r--lib/CodeGen/CGValue.h81
-rw-r--r--lib/CodeGen/CodeGenAction.cpp12
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp262
-rw-r--r--lib/CodeGen/CodeGenFunction.h414
-rw-r--r--lib/CodeGen/CodeGenModule.cpp58
-rw-r--r--lib/CodeGen/CodeGenModule.h125
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp617
-rw-r--r--lib/CodeGen/CodeGenTypes.h130
-rw-r--r--lib/CodeGen/GlobalDecl.h127
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp123
-rw-r--r--lib/CodeGen/TargetInfo.cpp274
-rw-r--r--lib/CodeGen/TargetInfo.h20
47 files changed, 6320 insertions, 2873 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index ce10398..1381238 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -68,22 +68,22 @@ namespace clang {
private:
Kind TheKind;
- llvm::PATypeHolder TypeData;
+ llvm::Type *TypeData;
unsigned UIntData;
bool BoolData0;
bool BoolData1;
- ABIArgInfo(Kind K, const llvm::Type *TD=0,
+ ABIArgInfo(Kind K, llvm::Type *TD=0,
unsigned UI=0, bool B0 = false, bool B1 = false)
: TheKind(K), TypeData(TD), UIntData(UI), BoolData0(B0), BoolData1(B1) {}
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
- static ABIArgInfo getDirect(const llvm::Type *T = 0, unsigned Offset = 0) {
+ static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0) {
return ABIArgInfo(Direct, T, Offset);
}
- static ABIArgInfo getExtend(const llvm::Type *T = 0) {
+ static ABIArgInfo getExtend(llvm::Type *T = 0) {
return ABIArgInfo(Extend, T, 0);
}
static ABIArgInfo getIgnore() {
@@ -113,12 +113,12 @@ namespace clang {
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
return UIntData;
}
- const llvm::Type *getCoerceToType() const {
+ llvm::Type *getCoerceToType() const {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
}
- void setCoerceToType(const llvm::Type *T) {
+ void setCoerceToType(llvm::Type *T) {
assert(canHaveCoerceToType() && "Invalid kind!");
TypeData = T;
}
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 01d15ff..85f42db 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -10,6 +10,7 @@
#include "clang/CodeGen/BackendUtil.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/Module.h"
@@ -18,13 +19,13 @@
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/PassManagerBuilder.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/SubtargetFeature.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -39,6 +40,7 @@ class EmitAssemblyHelper {
Diagnostic &Diags;
const CodeGenOptions &CodeGenOpts;
const TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
Module *TheModule;
Timer CodeGenerationTime;
@@ -82,8 +84,9 @@ private:
public:
EmitAssemblyHelper(Diagnostic &_Diags,
const CodeGenOptions &CGOpts, const TargetOptions &TOpts,
+ const LangOptions &LOpts,
Module *M)
- : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts),
+ : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
TheModule(M), CodeGenerationTime("Code Generation Time"),
CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {}
@@ -98,6 +101,16 @@ public:
}
+static void addObjCARCExpandPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCExpandPass());
+}
+
+static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCOptPass());
+}
+
void EmitAssemblyHelper::CreatePasses() {
unsigned OptLevel = CodeGenOpts.OptimizationLevel;
CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
@@ -116,6 +129,14 @@ void EmitAssemblyHelper::CreatePasses() {
PMBuilder.DisableSimplifyLibCalls = !CodeGenOpts.SimplifyLibCalls;
PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime;
PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
+
+ // In ObjC ARC mode, add the main ARC optimization passes.
+ if (LangOpts.ObjCAutoRefCount) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+ addObjCARCExpandPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addObjCARCOptPass);
+ }
// Figure out TargetLibraryInfo.
Triple TargetTriple(TheModule->getTargetTriple());
@@ -258,16 +279,16 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
const_cast<char **>(&BackendArgs[0]));
std::string FeaturesStr;
- if (TargetOpts.CPU.size() || TargetOpts.Features.size()) {
+ if (TargetOpts.Features.size()) {
SubtargetFeatures Features;
- Features.setCPU(TargetOpts.CPU);
for (std::vector<std::string>::const_iterator
it = TargetOpts.Features.begin(),
ie = TargetOpts.Features.end(); it != ie; ++it)
Features.AddFeature(*it);
FeaturesStr = Features.getString();
}
- TargetMachine *TM = TheTarget->createTargetMachine(Triple, FeaturesStr);
+ TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
+ FeaturesStr);
if (CodeGenOpts.RelaxAll)
TM->setMCRelaxAll(true);
@@ -275,6 +296,8 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
TM->setMCSaveTempLabels(true);
if (CodeGenOpts.NoDwarf2CFIAsm)
TM->setMCUseCFI(false);
+ if (CodeGenOpts.NoExecStack)
+ TM->setMCNoExecStack(true);
// Create the code generator passes.
PassManager *PM = getCodeGenPasses();
@@ -295,6 +318,13 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
CGFT = TargetMachine::CGFT_Null;
else
assert(Action == Backend_EmitAssembly && "Invalid action!");
+
+ // Add ObjC ARC final-cleanup optimizations. This is done as part of the
+ // "codegen" passes so that it isn't run multiple times when there is
+ // inlining happening.
+ if (LangOpts.ObjCAutoRefCount)
+ PM->add(createObjCARCContractPass());
+
if (TM->addPassesToEmitFile(*PM, OS, CGFT, OptLevel,
/*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
Diags.Report(diag::err_fe_unable_to_interface_with_target);
@@ -357,9 +387,11 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
}
void clang::EmitBackendOutput(Diagnostic &Diags, const CodeGenOptions &CGOpts,
- const TargetOptions &TOpts, Module *M,
+ const TargetOptions &TOpts,
+ const LangOptions &LOpts,
+ Module *M,
BackendAction Action, raw_ostream *OS) {
- EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, M);
+ EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M);
AsmHelper.EmitAssembly(Action, OS);
}
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index e5da703..9815d1d 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -95,9 +95,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
else
elements.push_back(llvm::Constant::getNullValue(i8p));
- llvm::Constant *init =
- llvm::ConstantStruct::get(CGM.getLLVMContext(), elements.data(),
- elements.size(), false);
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(elements);
llvm::GlobalVariable *global =
new llvm::GlobalVariable(CGM.getModule(), init->getType(), true,
@@ -166,11 +164,11 @@ namespace {
CharUnits Alignment;
CharUnits Size;
const BlockDecl::Capture *Capture; // null for 'this'
- const llvm::Type *Type;
+ llvm::Type *Type;
BlockLayoutChunk(CharUnits align, CharUnits size,
const BlockDecl::Capture *capture,
- const llvm::Type *type)
+ llvm::Type *type)
: Alignment(align), Size(size), Capture(capture), Type(type) {}
/// Tell the block info that this chunk has the given field index.
@@ -245,7 +243,7 @@ static CharUnits getLowBit(CharUnits v) {
}
static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
- llvm::SmallVectorImpl<const llvm::Type*> &elementTypes) {
+ llvm::SmallVectorImpl<llvm::Type*> &elementTypes) {
ASTContext &C = CGM.getContext();
// The header is basically a 'struct { void *; int; int; void *; void *; }'.
@@ -265,8 +263,8 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
info.BlockSize = headerSize;
assert(elementTypes.empty());
- const llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
- const llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy);
+ llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+ llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy);
elementTypes.push_back(i8p);
elementTypes.push_back(intTy);
elementTypes.push_back(intTy);
@@ -282,7 +280,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
ASTContext &C = CGM.getContext();
const BlockDecl *block = info.getBlockDecl();
- llvm::SmallVector<const llvm::Type*, 8> elementTypes;
+ llvm::SmallVector<llvm::Type*, 8> elementTypes;
initializeForBlockHeader(CGM, info, elementTypes);
if (!block->hasCaptures()) {
@@ -310,7 +308,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
else
thisType = cast<CXXMethodDecl>(DC)->getThisType(C);
- const llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
+ llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
std::pair<CharUnits,CharUnits> tinfo
= CGM.getContext().getTypeInfoInChars(thisType);
maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
@@ -330,7 +328,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
// Just use void* instead of a pointer to the byref type.
QualType byRefPtrTy = C.VoidPtrTy;
- const llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy);
+ llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy);
std::pair<CharUnits,CharUnits> tinfo
= CGM.getContext().getTypeInfoInChars(byRefPtrTy);
maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
@@ -347,13 +345,23 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
continue;
}
- // Block pointers require copy/dispose.
- if (variable->getType()->isBlockPointerType()) {
- info.NeedsCopyDispose = true;
+ // If we have a lifetime qualifier, honor it for capture purposes.
+ // That includes *not* copying it if it's __unsafe_unretained.
+ if (Qualifiers::ObjCLifetime lifetime
+ = variable->getType().getObjCLifetime()) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ info.NeedsCopyDispose = true;
+ }
- // So do Objective-C pointers.
- } else if (variable->getType()->isObjCObjectPointerType() ||
- C.isObjCNSObjectType(variable->getType())) {
+ // Block pointers require copy/dispose. So do Objective-C pointers.
+ } else if (variable->getType()->isObjCRetainableType()) {
info.NeedsCopyDispose = true;
// So do types that require non-trivial copy construction.
@@ -376,7 +384,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
CharUnits align = C.getDeclAlign(variable);
maxFieldAlign = std::max(maxFieldAlign, align);
- const llvm::Type *llvmType =
+ llvm::Type *llvmType =
CGM.getTypes().ConvertTypeForMem(variable->getType());
layout.push_back(BlockLayoutChunk(align, size, &*ci, llvmType));
@@ -591,6 +599,11 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// Otherwise, fake up a POD copy into the block field.
} else {
+ // Fake up a new variable so that EmitScalarInit doesn't think
+ // we're referring to the variable in its own initializer.
+ ImplicitParamDecl blockFieldPseudoVar(/*DC*/ 0, SourceLocation(),
+ /*name*/ 0, type);
+
// We use one of these or the other depending on whether the
// reference is nested.
DeclRefExpr notNested(const_cast<VarDecl*>(variable), type, VK_LValue,
@@ -603,15 +616,37 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
declRef, VK_RValue);
- EmitExprAsInit(&l2r, variable, blockField,
- getContext().getDeclAlign(variable),
+ EmitExprAsInit(&l2r, &blockFieldPseudoVar,
+ LValue::MakeAddr(blockField, type,
+ getContext().getDeclAlign(variable)
+ .getQuantity(),
+ getContext()),
/*captured by init*/ false);
}
// Push a destructor if necessary. The semantics for when this
// actually gets run are really obscure.
- if (!ci->isByRef() && CGM.getLangOptions().CPlusPlus)
- PushDestructorCleanup(type, blockField);
+ if (!ci->isByRef()) {
+ switch (QualType::DestructionKind dtorKind = type.isDestructedType()) {
+ case QualType::DK_none:
+ break;
+
+ // Block captures count as local values and have imprecise semantics.
+ // They also can't be arrays, so need to worry about that.
+ case QualType::DK_objc_strong_lifetime: {
+ // This local is a GCC and MSVC compiler workaround.
+ Destroyer *destroyer = &destroyARCStrongImprecise;
+ pushDestroy(getCleanupKind(dtorKind), blockField, type,
+ *destroyer, /*useEHCleanupForArray*/ false);
+ break;
+ }
+
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_cxx_destructor:
+ pushDestroy(dtorKind, blockField, type);
+ break;
+ }
+ }
}
// Cast to the converted block-pointer type, which happens (somewhat
@@ -624,11 +659,11 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
}
-const llvm::Type *CodeGenModule::getBlockDescriptorType() {
+llvm::Type *CodeGenModule::getBlockDescriptorType() {
if (BlockDescriptorType)
return BlockDescriptorType;
- const llvm::Type *UnsignedLongTy =
+ llvm::Type *UnsignedLongTy =
getTypes().ConvertType(getContext().UnsignedLongTy);
// struct __block_descriptor {
@@ -645,24 +680,20 @@ const llvm::Type *CodeGenModule::getBlockDescriptorType() {
// const char *signature; // the block signature
// const char *layout; // reserved
// };
- BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(),
- UnsignedLongTy,
- UnsignedLongTy,
- NULL);
-
- getModule().addTypeName("struct.__block_descriptor",
- BlockDescriptorType);
+ BlockDescriptorType =
+ llvm::StructType::createNamed("struct.__block_descriptor",
+ UnsignedLongTy, UnsignedLongTy, NULL);
// Now form a pointer to that.
BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType);
return BlockDescriptorType;
}
-const llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
+llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
if (GenericBlockLiteralType)
return GenericBlockLiteralType;
- const llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
+ llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
// struct __block_literal_generic {
// void *__isa;
@@ -671,16 +702,14 @@ const llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
// void (*__invoke)(void *);
// struct __block_descriptor *__descriptor;
// };
- GenericBlockLiteralType = llvm::StructType::get(getLLVMContext(),
- VoidPtrTy,
- IntTy,
- IntTy,
- VoidPtrTy,
- BlockDescPtrTy,
- NULL);
-
- getModule().addTypeName("struct.__block_literal_generic",
- GenericBlockLiteralType);
+ GenericBlockLiteralType =
+ llvm::StructType::createNamed("struct.__block_literal_generic",
+ VoidPtrTy,
+ IntTy,
+ IntTy,
+ VoidPtrTy,
+ BlockDescPtrTy,
+ NULL);
return GenericBlockLiteralType;
}
@@ -822,9 +851,7 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
// Descriptor
fields[4] = buildBlockDescriptor(CGM, blockInfo);
- llvm::Constant *init =
- llvm::ConstantStruct::get(CGM.getLLVMContext(), fields, BlockHeaderSize,
- /*packed*/ false);
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(fields);
llvm::GlobalVariable *literal =
new llvm::GlobalVariable(CGM.getModule(),
@@ -1023,8 +1050,6 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
-
-
llvm::Constant *
CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
ASTContext &C = getContext();
@@ -1084,21 +1109,40 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
if (capture.isConstant()) continue;
const Expr *copyExpr = ci->getCopyExpr();
- unsigned flags = 0;
+ BlockFieldFlags flags;
+
+ bool isARCWeakCapture = false;
if (copyExpr) {
assert(!ci->isByRef());
// don't bother computing flags
+
} else if (ci->isByRef()) {
flags = BLOCK_FIELD_IS_BYREF;
- if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK;
- } else if (type->isBlockPointerType()) {
- flags = BLOCK_FIELD_IS_BLOCK;
- } else if (type->isObjCObjectPointerType() || C.isObjCNSObjectType(type)) {
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+
+ } else if (type->isObjCRetainableType()) {
flags = BLOCK_FIELD_IS_OBJECT;
- }
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures:
+ if (getLangOptions().ObjCAutoRefCount) {
+ Qualifiers qs = type.getQualifiers();
- if (!copyExpr && !flags) continue;
+ // Don't generate special copy logic for a captured object
+ // unless it's __strong or __weak.
+ if (!qs.hasStrongOrWeakObjCLifetime())
+ continue;
+
+ // Support __weak direct captures.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
+ isARCWeakCapture = true;
+ }
+ } else {
+ continue;
+ }
unsigned index = capture.getIndex();
llvm::Value *srcField = Builder.CreateStructGEP(src, index);
@@ -1107,12 +1151,14 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// If there's an explicit copy expression, we do that.
if (copyExpr) {
EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
+ } else if (isARCWeakCapture) {
+ EmitARCCopyWeak(dstField, srcField);
} else {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
- llvm::ConstantInt::get(Int32Ty, flags));
+ llvm::ConstantInt::get(Int32Ty, flags.getBitMask()));
}
}
@@ -1176,20 +1222,37 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
BlockFieldFlags flags;
const CXXDestructorDecl *dtor = 0;
+ bool isARCWeakCapture = false;
+
if (ci->isByRef()) {
flags = BLOCK_FIELD_IS_BYREF;
- if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK;
- } else if (type->isBlockPointerType()) {
- flags = BLOCK_FIELD_IS_BLOCK;
- } else if (type->isObjCObjectPointerType() || C.isObjCNSObjectType(type)) {
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+ } else if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
+ if (record->hasTrivialDestructor())
+ continue;
+ dtor = record->getDestructor();
+ } else if (type->isObjCRetainableType()) {
flags = BLOCK_FIELD_IS_OBJECT;
- } else if (C.getLangOptions().CPlusPlus) {
- if (const CXXRecordDecl *record = type->getAsCXXRecordDecl())
- if (!record->hasTrivialDestructor())
- dtor = record->getDestructor();
- }
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
- if (!dtor && flags.empty()) continue;
+ // Special rules for ARC captures.
+ if (getLangOptions().ObjCAutoRefCount) {
+ Qualifiers qs = type.getQualifiers();
+
+ // Don't generate special dispose logic for a captured object
+ // unless it's __strong or __weak.
+ if (!qs.hasStrongOrWeakObjCLifetime())
+ continue;
+
+ // Support __weak direct captures.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
+ isARCWeakCapture = true;
+ }
+ } else {
+ continue;
+ }
unsigned index = capture.getIndex();
llvm::Value *srcField = Builder.CreateStructGEP(src, index);
@@ -1198,6 +1261,10 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
if (dtor) {
PushDestructorCleanup(dtor, srcField);
+ // If this is a __weak capture, emit the release directly.
+ } else if (isARCWeakCapture) {
+ EmitARCDestroyWeak(srcField);
+
// Otherwise we call _Block_object_dispose. It wouldn't be too
// hard to just emit this as a cleanup if we wanted to make sure
// that things were done in reverse.
@@ -1251,6 +1318,55 @@ public:
}
};
+/// Emits the copy/dispose helpers for an ARC __block __weak variable.
+class ARCWeakByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCWeakByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ CGF.EmitARCMoveWeak(destField, srcField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ CGF.EmitARCDestroyWeak(field);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 0 is distinguishable from all pointers and byref flags
+ id.AddInteger(0);
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __strong variable
+/// that's not of block-pointer type.
+class ARCStrongByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCStrongByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ // Do a "move" by copying the value and then zeroing out the old
+ // variable.
+
+ llvm::Value *value = CGF.Builder.CreateLoad(srcField);
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType()));
+ CGF.Builder.CreateStore(value, destField);
+ CGF.Builder.CreateStore(null, srcField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ llvm::Value *value = CGF.Builder.CreateLoad(field);
+ CGF.EmitARCRelease(value, /*precise*/ false);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 1 is distinguishable from all pointers and byref flags
+ id.AddInteger(1);
+ }
+};
+
/// Emits the copy/dispose helpers for a __block variable with a
/// nontrivial copy constructor or destructor.
class CXXByrefHelpers : public CodeGenModule::ByrefHelpers {
@@ -1318,6 +1434,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
SC_Static,
SC_None,
false, true);
+
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsCopy()) {
@@ -1449,6 +1566,52 @@ CodeGenFunction::buildByrefHelpers(const llvm::StructType &byrefType,
return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
}
+ // Otherwise, if we don't have a retainable type, there's nothing to do.
+ // that the runtime does extra copies.
+ if (!type->isObjCRetainableType()) return 0;
+
+ Qualifiers qs = type.getQualifiers();
+
+ // If we have lifetime, that dominates.
+ if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
+ assert(getLangOptions().ObjCAutoRefCount);
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+
+ // These are just bits as far as the runtime is concerned.
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return 0;
+
+ // Tell the runtime that this is ARC __weak, called by the
+ // byref routines.
+ case Qualifiers::OCL_Weak: {
+ ARCWeakByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+
+ // ARC __strong __block variables need to be retained.
+ case Qualifiers::OCL_Strong:
+ // Block-pointers need to be _Block_copy'ed, so we let the
+ // runtime be in charge. But we can't use the code below
+ // because we don't want to set BYREF_CALLER, which will
+ // just make the runtime ignore us.
+ if (type->isBlockPointerType()) {
+ BlockFieldFlags flags = BLOCK_FIELD_IS_BLOCK;
+ ObjectByrefHelpers byrefInfo(emission.Alignment, flags);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+
+ // Otherwise, we transfer ownership of the retain from the stack
+ // to the heap.
+ } else {
+ ARCStrongByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+ }
+ llvm_unreachable("fell out of lifetime switch!");
+ }
+
BlockFieldFlags flags;
if (type->isBlockPointerType()) {
flags |= BLOCK_FIELD_IS_BLOCK;
@@ -1502,15 +1665,17 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
QualType Ty = D->getType();
- llvm::SmallVector<const llvm::Type *, 8> types;
+ llvm::SmallVector<llvm::Type *, 8> types;
- llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(getLLVMContext());
+ llvm::StructType *ByRefType =
+ llvm::StructType::createNamed(getLLVMContext(),
+ "struct.__block_byref_" + D->getNameAsString());
// void *__isa;
types.push_back(Int8PtrTy);
// void *__forwarding;
- types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
+ types.push_back(llvm::PointerType::getUnqual(ByRefType));
// int32_t __flags;
types.push_back(Int32Ty);
@@ -1545,7 +1710,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
if (NumPaddingBytes > 0) {
- const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
+ llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
// FIXME: We need a sema error for alignment larger than the minimum of
// the maximal stack alignment and the alignment of malloc on the system.
if (NumPaddingBytes > 1)
@@ -1561,13 +1726,9 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
// T x;
types.push_back(ConvertTypeForMem(Ty));
- const llvm::Type *T = llvm::StructType::get(getLLVMContext(), types, Packed);
-
- cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
- CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
- ByRefTypeHolder.get());
+ ByRefType->setBody(types, Packed);
- Info.first = ByRefTypeHolder.get();
+ Info.first = ByRefType;
Info.second = types.size() - 1;
@@ -1638,7 +1799,8 @@ namespace {
llvm::Value *Addr;
CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Should we be passing FIELD_IS_WEAK here?
CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
}
};
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 9bd18e5..4d8dead 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -17,7 +17,6 @@
#include "CodeGenTypes.h"
#include "clang/AST/Type.h"
#include "llvm/Module.h"
-#include "llvm/ADT/SmallVector.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Expr.h"
@@ -89,7 +88,7 @@ enum BlockFieldFlag_t {
variable */
BLOCK_FIELD_IS_WEAK = 0x10, /* declared __weak, only used in byref copy
helpers */
-
+ BLOCK_FIELD_IS_ARC = 0x40, /* field has ARC-specific semantics */
BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
support routines */
BLOCK_BYREF_CURRENT_MAX = 256
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 14bebaf..1566bd9 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -22,6 +22,7 @@
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/Intrinsics.h"
#include "llvm/Target/TargetData.h"
+
using namespace clang;
using namespace CodeGen;
using namespace llvm;
@@ -37,8 +38,7 @@ static void EmitMemoryBarrier(CodeGenFunction &CGF,
StoreLoad ? True : False,
StoreStore ? True : False,
Device ? True : False };
- CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier),
- C, C + 5);
+ CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier), C);
}
/// Emit the conversions required to turn the given value into an
@@ -68,14 +68,14 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
// The atomic builtins are also full memory barriers. This is a utility for
// wrapping a call to the builtins with memory barriers.
static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
- Value **ArgBegin, Value **ArgEnd) {
+ ArrayRef<Value *> Args) {
// FIXME: We need a target hook for whether this applies to device memory or
// not.
bool Device = true;
// Create barriers both before and after the call.
EmitMemoryBarrier(CGF, true, true, true, true, Device);
- Value *Result = CGF.Builder.CreateCall(Fn, ArgBegin, ArgEnd);
+ Value *Result = CGF.Builder.CreateCall(Fn, Args);
EmitMemoryBarrier(CGF, true, true, true, true, Device);
return Result;
}
@@ -94,13 +94,13 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
unsigned AddrSpace =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::IntegerType *IntType =
+ llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
- const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+ llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes);
llvm::Value *Args[2];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
@@ -108,7 +108,7 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
const llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
@@ -129,13 +129,13 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
unsigned AddrSpace =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::IntegerType *IntType =
+ llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
- const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+ llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes);
llvm::Value *Args[2];
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
@@ -143,7 +143,7 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
- llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
@@ -164,7 +164,8 @@ static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
}
// The prototype is something that takes and returns whatever V's type is.
- llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
+ llvm::Type *ArgTys[] = { V->getType() };
+ llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), ArgTys,
false);
llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
@@ -232,8 +233,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_ctzll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
@@ -247,8 +248,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_clzll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
@@ -263,8 +264,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// ffs(x) -> x ? cttz(x) + 1 : 0
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
@@ -283,8 +284,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// parity(x) -> ctpop(x) & 1
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
@@ -300,8 +301,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_popcountll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
@@ -311,25 +312,27 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
case Builtin::BI__builtin_expect: {
- // FIXME: pass expect through to LLVM
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- if (E->getArg(1)->HasSideEffects(getContext()))
- (void)EmitScalarExpr(E->getArg(1));
- return RValue::get(ArgValue);
+ llvm::Type *ArgType = ArgValue->getType();
+
+ Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
+ Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
+
+ Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue,
+ "expval");
+ return RValue::get(Result);
}
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
}
case Builtin::BI__builtin_object_size: {
// We pass this builtin onto the optimizer so that it can
// figure out the object size in more complex cases.
- const llvm::Type *ResType[] = {
- ConvertType(E->getType())
- };
+ llvm::Type *ResType = ConvertType(E->getType());
// LLVM only supports 0 and 2, make sure that we pass along that
// as a boolean.
@@ -339,7 +342,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
uint64_t val = CI->getZExtValue();
CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
- Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType);
return RValue::get(Builder.CreateCall2(F,
EmitScalarExpr(E->getArg(0)),
CI));
@@ -351,11 +354,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::ConstantInt::get(Int32Ty, 0);
Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
llvm::ConstantInt::get(Int32Ty, 3);
- Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
- return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
+ Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
}
case Builtin::BI__builtin_trap: {
- Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
+ Value *F = CGM.getIntrinsic(Intrinsic::trap);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_unreachable: {
@@ -375,8 +379,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_powil: {
Value *Base = EmitScalarExpr(E->getArg(0));
Value *Exponent = EmitScalarExpr(E->getArg(1));
- const llvm::Type *ArgType = Base->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
+ llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
}
@@ -630,20 +634,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// this instead of hard-coding 0, which is correct for most targets.
int32_t Offset = 0;
- Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0);
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
return RValue::get(Builder.CreateCall(F,
llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
- Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_frame_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
- Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+ Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_extract_return_addr: {
@@ -681,8 +685,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
? Intrinsic::eh_return_i32
- : Intrinsic::eh_return_i64,
- 0, 0);
+ : Intrinsic::eh_return_i64);
Builder.CreateCall2(F, Int, Ptr);
Builder.CreateUnreachable();
@@ -692,7 +695,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(0);
}
case Builtin::BI__builtin_unwind_init: {
- Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_extend_pointer: {
@@ -860,13 +863,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned AddrSpace =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::IntegerType *IntType =
+ llvm::IntegerType *IntType =
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
- const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
- IntrinsicTypes, 2);
+ IntrinsicTypes);
Value *Args[3];
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
@@ -875,7 +878,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Args[1] = EmitToInt(*this, Args[1], T, IntType);
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
- Value *Result = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
+ Value *Result = EmitCallWithBarrier(*this, AtomF, Args);
Result = EmitFromInt(*this, Result, T, ValueType);
return RValue::get(Result);
}
@@ -890,13 +893,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned AddrSpace =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::IntegerType *IntType =
+ llvm::IntegerType *IntType =
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
- const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
- IntrinsicTypes, 2);
+ IntrinsicTypes);
Value *Args[3];
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
@@ -904,7 +907,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
Value *OldVal = Args[1];
- Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
+ Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args);
Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
// zext bool to int.
Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
@@ -953,7 +956,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
EmitScalarExpr(E->getArg(3)),
EmitScalarExpr(E->getArg(4))
};
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C);
return RValue::get(0);
}
@@ -977,11 +980,27 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
Value *Base = EmitScalarExpr(E->getArg(0));
Value *Exponent = EmitScalarExpr(E->getArg(1));
- const llvm::Type *ArgType = Base->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
+ llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
}
+ case Builtin::BIfma:
+ case Builtin::BIfmaf:
+ case Builtin::BIfmal:
+ case Builtin::BI__builtin_fma:
+ case Builtin::BI__builtin_fmaf:
+ case Builtin::BI__builtin_fmal: {
+ // Rewrite fma to intrinsic.
+ Value *FirstArg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = FirstArg->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
+ return RValue::get(Builder.CreateCall3(F, FirstArg,
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)),
+ "tmp"));
+ }
+
case Builtin::BI__builtin_signbit:
case Builtin::BI__builtin_signbitf:
case Builtin::BI__builtin_signbitl: {
@@ -1055,7 +1074,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Args.push_back(ArgValue);
}
- Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
+ Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
const llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
@@ -1099,8 +1118,7 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
}
}
-static const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type,
- bool q) {
+static llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
switch (type) {
default: break;
case 0:
@@ -1133,7 +1151,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
else
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
- return Builder.CreateCall(F, Ops.begin(), Ops.end(), name);
+ return Builder.CreateCall(F, Ops, name);
}
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
@@ -1181,8 +1199,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
llvm::StringRef Name = FD->getName();
- return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- Ops.begin(), Ops.end());
+ return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
if (BuiltinID == ARM::BI__builtin_arm_ldrexd) {
@@ -1203,8 +1220,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (BuiltinID == ARM::BI__builtin_arm_strexd) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd);
- llvm::Type *STy = llvm::StructType::get(getLLVMContext(), Int32Ty, Int32Ty,
- NULL);
+ llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(Int64Ty, One, "tmp");
@@ -1232,7 +1248,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
// Determine the overloaded type of this builtin.
- const llvm::Type *Ty;
+ llvm::Type *Ty;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
Ty = llvm::Type::getFloatTy(getLLVMContext());
else
@@ -1243,8 +1259,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
// Call the appropriate intrinsic.
- Function *F = CGM.getIntrinsic(Int, &Ty, 1);
- return Builder.CreateCall(F, Ops.begin(), Ops.end(), "vcvtr");
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ return Builder.CreateCall(F, Ops, "vcvtr");
}
// Determine the type of this overloaded NEON intrinsic.
@@ -1255,8 +1271,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
(void)poly; // Only used in assert()s.
bool rightShift = false;
- const llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad);
- const llvm::Type *Ty = VTy;
+ llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad);
+ llvm::Type *Ty = VTy;
if (!Ty)
return 0;
@@ -1266,13 +1282,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vabd_v:
case ARM::BI__builtin_neon_vabdq_v:
Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
case ARM::BI__builtin_neon_vabs_v:
case ARM::BI__builtin_neon_vabsq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty),
Ops, "vabs");
case ARM::BI__builtin_neon_vaddhn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, Ty),
Ops, "vaddhn");
case ARM::BI__builtin_neon_vcale_v:
std::swap(Ops[0], Ops[1]);
@@ -1300,17 +1316,17 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vcls_v:
case ARM::BI__builtin_neon_vclsq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty);
return EmitNeonCall(F, Ops, "vcls");
}
case ARM::BI__builtin_neon_vclz_v:
case ARM::BI__builtin_neon_vclzq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, Ty);
return EmitNeonCall(F, Ops, "vclz");
}
case ARM::BI__builtin_neon_vcnt_v:
case ARM::BI__builtin_neon_vcntq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, Ty);
return EmitNeonCall(F, Ops, "vcnt");
}
case ARM::BI__builtin_neon_vcvt_f16_v: {
@@ -1340,18 +1356,18 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vcvt_n_f32_v:
case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
- const llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty };
+ llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty };
Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp;
- Function *F = CGM.getIntrinsic(Int, Tys, 2);
+ Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
case ARM::BI__builtin_neon_vcvt_n_s32_v:
case ARM::BI__builtin_neon_vcvt_n_u32_v:
case ARM::BI__builtin_neon_vcvtq_n_s32_v:
case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
- const llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) };
+ llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) };
Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs;
- Function *F = CGM.getIntrinsic(Int, Tys, 2);
+ Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
case ARM::BI__builtin_neon_vext_v:
@@ -1381,15 +1397,15 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vhadd_v:
case ARM::BI__builtin_neon_vhaddq_v:
Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhadd");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd");
case ARM::BI__builtin_neon_vhsub_v:
case ARM::BI__builtin_neon_vhsubq_v:
Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhsub");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
case ARM::BI__builtin_neon_vld1_v:
case ARM::BI__builtin_neon_vld1q_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
Ops, "vld1");
case ARM::BI__builtin_neon_vld1_lane_v:
case ARM::BI__builtin_neon_vld1q_lane_v:
@@ -1410,7 +1426,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vld2_v:
case ARM::BI__builtin_neon_vld2q_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
Value *Align = GetPointeeAlignment(*this, E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -1419,7 +1435,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vld3_v:
case ARM::BI__builtin_neon_vld3q_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
Value *Align = GetPointeeAlignment(*this, E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -1428,7 +1444,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vld4_v:
case ARM::BI__builtin_neon_vld4q_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
Value *Align = GetPointeeAlignment(*this, E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -1437,36 +1453,42 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vld2_lane_v:
case ARM::BI__builtin_neon_vld2q_lane_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld2_lane");
+ Ops[1] = Builder.CreateCall(F,
+ ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
+ "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case ARM::BI__builtin_neon_vld3_lane_v:
case ARM::BI__builtin_neon_vld3q_lane_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ops[1] = Builder.CreateCall(F,
+ ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
+ "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case ARM::BI__builtin_neon_vld4_lane_v:
case ARM::BI__builtin_neon_vld4q_lane_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ops[1] = Builder.CreateCall(F,
+ ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
+ "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1488,7 +1510,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
break;
default: assert(0 && "unknown vld_dup intrinsic?");
}
- Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Int, Ty);
Value *Align = GetPointeeAlignment(*this, E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -1507,7 +1529,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
break;
default: assert(0 && "unknown vld_dup intrinsic?");
}
- Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Int, Ty);
const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
SmallVector<Value*, 6> Args;
@@ -1518,7 +1540,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Args.push_back(CI);
Args.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup");
+ Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Value *Val = Builder.CreateExtractValue(Ops[1], i);
@@ -1534,11 +1556,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vmax_v:
case ARM::BI__builtin_neon_vmaxq_v:
Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmax");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
case ARM::BI__builtin_neon_vmin_v:
case ARM::BI__builtin_neon_vminq_v:
Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
case ARM::BI__builtin_neon_vmovl_v: {
const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
@@ -1554,12 +1576,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vmul_v:
case ARM::BI__builtin_neon_vmulq_v:
assert(poly && "vmul builtin only supported for polynomial types");
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
Ops, "vmul");
case ARM::BI__builtin_neon_vmull_v:
Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmull");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case ARM::BI__builtin_neon_vpadal_v:
case ARM::BI__builtin_neon_vpadalq_v: {
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
@@ -1567,13 +1589,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
const llvm::Type *EltTy =
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
- const llvm::Type *NarrowTy =
+ llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
- const llvm::Type *Tys[2] = { Ty, NarrowTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpadal");
+ llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal");
}
case ARM::BI__builtin_neon_vpadd_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty),
Ops, "vpadd");
case ARM::BI__builtin_neon_vpaddl_v:
case ARM::BI__builtin_neon_vpaddlq_v: {
@@ -1581,120 +1603,120 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
const llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
- const llvm::Type *NarrowTy =
+ llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
- const llvm::Type *Tys[2] = { Ty, NarrowTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpaddl");
+ llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
}
case ARM::BI__builtin_neon_vpmax_v:
Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmax");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
case ARM::BI__builtin_neon_vpmin_v:
Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmin");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
case ARM::BI__builtin_neon_vqabs_v:
case ARM::BI__builtin_neon_vqabsq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty),
Ops, "vqabs");
case ARM::BI__builtin_neon_vqadd_v:
case ARM::BI__builtin_neon_vqaddq_v:
Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqadd");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd");
case ARM::BI__builtin_neon_vqdmlal_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, Ty),
Ops, "vqdmlal");
case ARM::BI__builtin_neon_vqdmlsl_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, Ty),
Ops, "vqdmlsl");
case ARM::BI__builtin_neon_vqdmulh_v:
case ARM::BI__builtin_neon_vqdmulhq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty),
Ops, "vqdmulh");
case ARM::BI__builtin_neon_vqdmull_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
Ops, "vqdmull");
case ARM::BI__builtin_neon_vqmovn_v:
Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqmovn");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn");
case ARM::BI__builtin_neon_vqmovun_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty),
Ops, "vqdmull");
case ARM::BI__builtin_neon_vqneg_v:
case ARM::BI__builtin_neon_vqnegq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty),
Ops, "vqneg");
case ARM::BI__builtin_neon_vqrdmulh_v:
case ARM::BI__builtin_neon_vqrdmulhq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty),
Ops, "vqrdmulh");
case ARM::BI__builtin_neon_vqrshl_v:
case ARM::BI__builtin_neon_vqrshlq_v:
Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshl");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl");
case ARM::BI__builtin_neon_vqrshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n",
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
1, true);
case ARM::BI__builtin_neon_vqrshrun_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
Ops, "vqrshrun_n", 1, true);
case ARM::BI__builtin_neon_vqshl_v:
case ARM::BI__builtin_neon_vqshlq_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl");
case ARM::BI__builtin_neon_vqshl_n_v:
case ARM::BI__builtin_neon_vqshlq_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n",
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
1, false);
case ARM::BI__builtin_neon_vqshlu_n_v:
case ARM::BI__builtin_neon_vqshluq_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
Ops, "vqshlu", 1, false);
case ARM::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n",
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
1, true);
case ARM::BI__builtin_neon_vqshrun_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
Ops, "vqshrun_n", 1, true);
case ARM::BI__builtin_neon_vqsub_v:
case ARM::BI__builtin_neon_vqsubq_v:
Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqsub");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub");
case ARM::BI__builtin_neon_vraddhn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty),
Ops, "vraddhn");
case ARM::BI__builtin_neon_vrecpe_v:
case ARM::BI__builtin_neon_vrecpeq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
Ops, "vrecpe");
case ARM::BI__builtin_neon_vrecps_v:
case ARM::BI__builtin_neon_vrecpsq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty),
Ops, "vrecps");
case ARM::BI__builtin_neon_vrhadd_v:
case ARM::BI__builtin_neon_vrhaddq_v:
Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrhadd");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd");
case ARM::BI__builtin_neon_vrshl_v:
case ARM::BI__builtin_neon_vrshlq_v:
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshl");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl");
case ARM::BI__builtin_neon_vrshrn_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
Ops, "vrshrn_n", 1, true);
case ARM::BI__builtin_neon_vrshr_n_v:
case ARM::BI__builtin_neon_vrshrq_n_v:
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", 1, true);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
case ARM::BI__builtin_neon_vrsqrte_v:
case ARM::BI__builtin_neon_vrsqrteq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty),
Ops, "vrsqrte");
case ARM::BI__builtin_neon_vrsqrts_v:
case ARM::BI__builtin_neon_vrsqrtsq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty),
Ops, "vrsqrts");
case ARM::BI__builtin_neon_vrsra_n_v:
case ARM::BI__builtin_neon_vrsraq_n_v:
@@ -1702,10 +1724,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, &Ty, 1), Ops[1], Ops[2]);
+ Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
case ARM::BI__builtin_neon_vrsubhn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
Ops, "vrsubhn");
case ARM::BI__builtin_neon_vset_lane_i8:
case ARM::BI__builtin_neon_vset_lane_i16:
@@ -1722,16 +1744,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vshl_v:
case ARM::BI__builtin_neon_vshlq_v:
Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshl");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl");
case ARM::BI__builtin_neon_vshll_n_v:
Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", 1);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1);
case ARM::BI__builtin_neon_vshl_n_v:
case ARM::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
case ARM::BI__builtin_neon_vshrn_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
Ops, "vshrn_n", 1, true);
case ARM::BI__builtin_neon_vshr_n_v:
case ARM::BI__builtin_neon_vshrq_n_v:
@@ -1747,7 +1769,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vsli_n_v:
case ARM::BI__builtin_neon_vsliq_n_v:
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
Ops, "vsli_n");
case ARM::BI__builtin_neon_vsra_n_v:
case ARM::BI__builtin_neon_vsraq_n_v:
@@ -1762,7 +1784,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vst1_v:
case ARM::BI__builtin_neon_vst1q_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst1_lane_v:
case ARM::BI__builtin_neon_vst1q_lane_v:
@@ -1773,35 +1795,35 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vst2_v:
case ARM::BI__builtin_neon_vst2q_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst2_lane_v:
case ARM::BI__builtin_neon_vst2q_lane_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_v:
case ARM::BI__builtin_neon_vst3q_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_lane_v:
case ARM::BI__builtin_neon_vst3q_lane_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_v:
case ARM::BI__builtin_neon_vst4q_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_lane_v:
case ARM::BI__builtin_neon_vst4q_lane_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vsubhn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, Ty),
Ops, "vsubhn");
case ARM::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
@@ -2005,7 +2027,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, Ops, name);
}
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
@@ -2065,15 +2087,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, Ops, name);
}
case X86::BI__builtin_ia32_cmpps: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
+ return Builder.CreateCall(F, Ops, "cmpps");
}
case X86::BI__builtin_ia32_cmpss: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
+ return Builder.CreateCall(F, Ops, "cmpss");
}
case X86::BI__builtin_ia32_ldmxcsr: {
const llvm::Type *PtrTy = Int8PtrTy;
@@ -2093,11 +2115,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_cmppd: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
+ return Builder.CreateCall(F, Ops, "cmppd");
}
case X86::BI__builtin_ia32_cmpsd: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
+ return Builder.CreateCall(F, Ops, "cmpsd");
}
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
@@ -2141,7 +2163,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ return Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
@@ -2171,7 +2193,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ return Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
@@ -2323,7 +2345,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, Ops, name);
}
}
}
@@ -2379,7 +2401,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ return Builder.CreateCall(F, Ops, "");
}
// vec_st
@@ -2412,7 +2434,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ return Builder.CreateCall(F, Ops, "");
}
}
return 0;
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index 92f1c63..dcc28b4 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -34,7 +34,7 @@ static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
}
-const llvm::Type *
+llvm::Type *
CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index de4df3d..29f299a 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -82,7 +82,7 @@ public:
/// Find the LLVM type used to represent the given member pointer
/// type.
- virtual const llvm::Type *
+ virtual llvm::Type *
ConvertMemberPointerType(const MemberPointerType *MPT);
/// Load a member function from an object and a member function
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 712ae89..f8783ad 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -1,4 +1,4 @@
-//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,6 +25,8 @@
#include "llvm/Attributes.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Transforms/Utils/Local.h"
using namespace clang;
using namespace CodeGen;
@@ -65,31 +67,28 @@ static CanQualType GetReturnType(QualType RetTy) {
}
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP,
- bool IsRecursive) {
+CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
llvm::SmallVector<CanQualType, 16>(),
- FTNP->getExtInfo(), IsRecursive);
+ FTNP->getExtInfo());
}
/// \param Args - contains any initial parameters besides those
/// in the formal type
static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
llvm::SmallVectorImpl<CanQualType> &ArgTys,
- CanQual<FunctionProtoType> FTP,
- bool IsRecursive = false) {
+ CanQual<FunctionProtoType> FTP) {
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
ArgTys.push_back(FTP->getArgType(i));
CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
- return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive);
+ return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
}
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP,
- bool IsRecursive) {
+CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
llvm::SmallVector<CanQualType, 16> ArgTys;
- return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive);
+ return ::getFunctionInfo(*this, ArgTys, FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D) {
@@ -189,13 +188,15 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
e = MD->param_end(); i != e; ++i) {
ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
}
- return getFunctionInfo(GetReturnType(MD->getResultType()),
- ArgTys,
- FunctionType::ExtInfo(
- /*NoReturn*/ false,
- /*HasRegParm*/ false,
- /*RegParm*/ 0,
- getCallingConventionForDecl(MD)));
+
+ FunctionType::ExtInfo einfo;
+ einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
+
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ MD->hasAttr<NSReturnsRetainedAttr>())
+ einfo = einfo.withProducesResult(true);
+
+ return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, einfo);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
@@ -240,8 +241,7 @@ const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info,
- bool IsRecursive) {
+ const FunctionType::ExtInfo &Info) {
#ifndef NDEBUG
for (llvm::SmallVectorImpl<CanQualType>::const_iterator
I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
@@ -252,8 +252,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, Info, ResTy,
- ArgTys.begin(), ArgTys.end());
+ CGFunctionInfo::Profile(ID, Info, ResTy, ArgTys.begin(), ArgTys.end());
void *InsertPos = 0;
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
@@ -261,10 +260,14 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
return *FI;
// Construct the function info.
- FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getHasRegParm(), Info.getRegParm(), ResTy,
+ FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getProducesResult(),
+ Info.getHasRegParm(), Info.getRegParm(), ResTy,
ArgTys.data(), ArgTys.size());
FunctionInfos.InsertNode(FI, InsertPos);
+ bool Inserted = FunctionsBeingProcessed.insert(FI); (void)Inserted;
+ assert(Inserted && "Recursively being processed?");
+
// Compute ABI information.
getABIInfo().computeInfo(*FI);
@@ -273,30 +276,29 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// default now.
ABIArgInfo &RetInfo = FI->getReturnInfo();
if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
- RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
+ RetInfo.setCoerceToType(ConvertType(FI->getReturnType()));
for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
I != E; ++I)
if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
- I->info.setCoerceToType(ConvertTypeRecursive(I->type));
-
- // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
- // types, resolve them now. These pointers may point to this function, which
- // we *just* filled in the FunctionInfo for.
- if (!IsRecursive && !PointersToResolve.empty())
- HandleLateResolvedPointers();
+ I->info.setCoerceToType(ConvertType(I->type));
+ bool Erased = FunctionsBeingProcessed.erase(FI); (void)Erased;
+ assert(Erased && "Not in set?");
+
return *FI;
}
CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
- bool _NoReturn, bool _HasRegParm, unsigned _RegParm,
+ bool _NoReturn, bool returnsRetained,
+ bool _HasRegParm, unsigned _RegParm,
CanQualType ResTy,
const CanQualType *ArgTys,
unsigned NumArgTys)
: CallingConvention(_CallingConvention),
EffectiveCallingConvention(_CallingConvention),
- NoReturn(_NoReturn), HasRegParm(_HasRegParm), RegParm(_RegParm)
+ NoReturn(_NoReturn), ReturnsRetained(returnsRetained),
+ HasRegParm(_HasRegParm), RegParm(_RegParm)
{
NumArgs = NumArgTys;
@@ -310,8 +312,7 @@ CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
/***/
void CodeGenTypes::GetExpandedTypes(QualType type,
- llvm::SmallVectorImpl<const llvm::Type*> &expandedTypes,
- bool isRecursive) {
+ llvm::SmallVectorImpl<llvm::Type*> &expandedTypes) {
const RecordType *RT = type->getAsStructureType();
assert(RT && "Can only expand structure types.");
const RecordDecl *RD = RT->getDecl();
@@ -326,9 +327,9 @@ void CodeGenTypes::GetExpandedTypes(QualType type,
QualType fieldType = FD->getType();
if (fieldType->isRecordType())
- GetExpandedTypes(fieldType, expandedTypes, isRecursive);
+ GetExpandedTypes(fieldType, expandedTypes);
else
- expandedTypes.push_back(ConvertType(fieldType, isRecursive));
+ expandedTypes.push_back(ConvertType(fieldType));
}
}
@@ -352,7 +353,7 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
AI = ExpandTypeFromArgs(FT, LV, AI);
} else {
- EmitStoreThroughLValue(RValue::get(AI), LV, FT);
+ EmitStoreThroughLValue(RValue::get(AI), LV);
++AI;
}
}
@@ -360,33 +361,6 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
return AI;
}
-void
-CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
- llvm::SmallVector<llvm::Value*, 16> &Args) {
- const RecordType *RT = Ty->getAsStructureType();
- assert(RT && "Can only expand structure types.");
-
- RecordDecl *RD = RT->getDecl();
- assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
- llvm::Value *Addr = RV.getAggregateAddr();
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i) {
- FieldDecl *FD = *i;
- QualType FT = FD->getType();
-
- // FIXME: What are the right qualifiers here?
- LValue LV = EmitLValueForField(Addr, FD, 0);
- if (CodeGenFunction::hasAggregateLLVMType(FT)) {
- ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
- } else {
- RValue RV = EmitLoadOfLValue(LV, FT);
- assert(RV.isScalar() &&
- "Unexpected non-scalar rvalue during struct expansion.");
- Args.push_back(RV.getScalarVal());
- }
- }
-}
-
/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
/// accessing some number of bytes out of it, try to gep into the struct to get
/// at its inner goodness. Dive as deep as possible without entering an element
@@ -622,7 +596,7 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
return false;
}
-const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
const CGFunctionInfo &FI = getFunctionInfo(GD);
// For definition purposes, don't consider a K&R function variadic.
@@ -631,13 +605,16 @@ const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
Variadic = FPT->isVariadic();
- return GetFunctionType(FI, Variadic, false);
+ return GetFunctionType(FI, Variadic);
}
-const llvm::FunctionType *
-CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
- bool isRecursive) {
- llvm::SmallVector<const llvm::Type*, 8> argTypes;
+llvm::FunctionType *
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
+
+ bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
+ assert(Inserted && "Recursively being processed?");
+
+ llvm::SmallVector<llvm::Type*, 8> argTypes;
const llvm::Type *resultType = 0;
const ABIArgInfo &retAI = FI.getReturnInfo();
@@ -655,7 +632,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
resultType = llvm::Type::getVoidTy(getLLVMContext());
QualType ret = FI.getReturnType();
- const llvm::Type *ty = ConvertType(ret, isRecursive);
+ const llvm::Type *ty = ConvertType(ret);
unsigned addressSpace = Context.getTargetAddressSpace(ret);
argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
break;
@@ -676,7 +653,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
case ABIArgInfo::Indirect: {
// indirect arguments are always on the stack, which is addr space #0.
- const llvm::Type *LTy = ConvertTypeForMem(it->type, isRecursive);
+ const llvm::Type *LTy = ConvertTypeForMem(it->type);
argTypes.push_back(LTy->getPointerTo());
break;
}
@@ -686,7 +663,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
- const llvm::Type *argType = argAI.getCoerceToType();
+ llvm::Type *argType = argAI.getCoerceToType();
if (const llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
argTypes.push_back(st->getElementType(i));
@@ -697,11 +674,14 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
}
case ABIArgInfo::Expand:
- GetExpandedTypes(it->type, argTypes, isRecursive);
+ GetExpandedTypes(it->type, argTypes);
break;
}
}
+ bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
+ assert(Erased && "Not in set?");
+
return llvm::FunctionType::get(resultType, argTypes, isVariadic);
}
@@ -709,16 +689,15 @@ const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- if (!VerifyFuncTypeComplete(FPT)) {
- const CGFunctionInfo *Info;
- if (isa<CXXDestructorDecl>(MD))
- Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
- else
- Info = &getFunctionInfo(MD);
- return GetFunctionType(*Info, FPT->isVariadic(), false);
- }
-
- return llvm::OpaqueType::get(getLLVMContext());
+ if (!isFuncTypeConvertible(FPT))
+ return llvm::StructType::get(getLLVMContext());
+
+ const CGFunctionInfo *Info;
+ if (isa<CXXDestructorDecl>(MD))
+ Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ else
+ Info = &getFunctionInfo(MD);
+ return GetFunctionType(*Info, FPT->isVariadic());
}
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
@@ -845,11 +824,11 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
continue;
case ABIArgInfo::Expand: {
- llvm::SmallVector<const llvm::Type*, 8> types;
+ llvm::SmallVector<llvm::Type*, 8> types;
// FIXME: This is rather inefficient. Do we ever actually need to do
// anything here? The result should be just reconstructed on the other
// side, so extension should be a non-issue.
- getTypes().GetExpandedTypes(ParamType, types, false);
+ getTypes().GetExpandedTypes(ParamType, types);
Index += types.size();
continue;
}
@@ -1067,6 +1046,95 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(AI == Fn->arg_end() && "Argument mismatch!");
}
+/// Try to emit a fused autorelease of a return result.
+static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // We must be immediately followed the cast.
+ llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
+ if (BB->empty()) return 0;
+ if (&BB->back() != result) return 0;
+
+ const llvm::Type *resultType = result->getType();
+
+ // result is in a BasicBlock and is therefore an Instruction.
+ llvm::Instruction *generator = cast<llvm::Instruction>(result);
+
+ llvm::SmallVector<llvm::Instruction*,4> insnsToKill;
+
+ // Look for:
+ // %generator = bitcast %type1* %generator2 to %type2*
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
+ // We would have emitted this as a constant if the operand weren't
+ // an Instruction.
+ generator = cast<llvm::Instruction>(bitcast->getOperand(0));
+
+ // Require the generator to be immediately followed by the cast.
+ if (generator->getNextNode() != bitcast)
+ return 0;
+
+ insnsToKill.push_back(bitcast);
+ }
+
+ // Look for:
+ // %generator = call i8* @objc_retain(i8* %originalResult)
+ // or
+ // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
+ llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
+ if (!call) return 0;
+
+ bool doRetainAutorelease;
+
+ if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
+ doRetainAutorelease = true;
+ } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
+ .objc_retainAutoreleasedReturnValue) {
+ doRetainAutorelease = false;
+
+ // Look for an inline asm immediately preceding the call and kill it, too.
+ llvm::Instruction *prev = call->getPrevNode();
+ if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
+ if (asmCall->getCalledValue()
+ == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
+ insnsToKill.push_back(prev);
+ } else {
+ return 0;
+ }
+
+ result = call->getArgOperand(0);
+ insnsToKill.push_back(call);
+
+ // Keep killing bitcasts, for sanity. Note that we no longer care
+ // about precise ordering as long as there's exactly one use.
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
+ if (!bitcast->hasOneUse()) break;
+ insnsToKill.push_back(bitcast);
+ result = bitcast->getOperand(0);
+ }
+
+ // Delete all the unnecessary instructions, from latest to earliest.
+ for (llvm::SmallVectorImpl<llvm::Instruction*>::iterator
+ i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
+ (*i)->eraseFromParent();
+
+ // Do the fused retain/autorelease if we were asked to.
+ if (doRetainAutorelease)
+ result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
+
+ // Cast back to the result type.
+ return CGF.Builder.CreateBitCast(result, resultType);
+}
+
+/// Emit an ARC autorelease of the result of a function.
+static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // At -O0, try to emit a fused retain/autorelease.
+ if (CGF.shouldUseFusedARCCalls())
+ if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
+ return fused;
+
+ return CGF.EmitARCAutoreleaseReturnValue(result);
+}
+
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// Functions with no result always return void.
if (ReturnValue == 0) {
@@ -1134,6 +1202,16 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
+
+ // In ARC, end functions that return a retainable type with a call
+ // to objc_autoreleaseReturnValue.
+ if (AutoreleaseResult) {
+ assert(getLangOptions().ObjCAutoRefCount &&
+ !FI.isReturnsRetained() &&
+ RetTy->isObjCRetainableType());
+ RV = emitAutoreleaseOfResult(*this, RV);
+ }
+
break;
case ABIArgInfo::Ignore:
@@ -1183,13 +1261,157 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
return args.add(RValue::get(value), type);
}
+static bool isProvablyNull(llvm::Value *addr) {
+ return isa<llvm::ConstantPointerNull>(addr);
+}
+
+static bool isProvablyNonNull(llvm::Value *addr) {
+ return isa<llvm::AllocaInst>(addr);
+}
+
+/// Emit the actual writing-back of a writeback.
+static void emitWriteback(CodeGenFunction &CGF,
+ const CallArgList::Writeback &writeback) {
+ llvm::Value *srcAddr = writeback.Address;
+ assert(!isProvablyNull(srcAddr) &&
+ "shouldn't have writeback for provably null argument");
+
+ llvm::BasicBlock *contBB = 0;
+
+ // If the argument wasn't provably non-null, we need to null check
+ // before doing the store.
+ bool provablyNonNull = isProvablyNonNull(srcAddr);
+ if (!provablyNonNull) {
+ llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
+ contBB = CGF.createBasicBlock("icr.done");
+
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+ CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
+ CGF.EmitBlock(writebackBB);
+ }
+
+ // Load the value to writeback.
+ llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
+
+ // Cast it back, in case we're writing an id to a Foo* or something.
+ value = CGF.Builder.CreateBitCast(value,
+ cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
+ "icr.writeback-cast");
+
+ // Perform the writeback.
+ QualType srcAddrType = writeback.AddressType;
+ CGF.EmitStoreThroughLValue(RValue::get(value),
+ CGF.MakeAddrLValue(srcAddr, srcAddrType));
+
+ // Jump to the continuation block.
+ if (!provablyNonNull)
+ CGF.EmitBlock(contBB);
+}
+
+static void emitWritebacks(CodeGenFunction &CGF,
+ const CallArgList &args) {
+ for (CallArgList::writeback_iterator
+ i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
+ emitWriteback(CGF, *i);
+}
+
+/// Emit an argument that's being passed call-by-writeback. That is,
+/// we are passing the address of
+static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
+ const ObjCIndirectCopyRestoreExpr *CRE) {
+ llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
+
+ // The dest and src types don't necessarily match in LLVM terms
+ // because of the crazy ObjC compatibility rules.
+
+ const llvm::PointerType *destType =
+ cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
+
+ // If the address is a constant null, just pass the appropriate null.
+ if (isProvablyNull(srcAddr)) {
+ args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
+ CRE->getType());
+ return;
+ }
+
+ QualType srcAddrType =
+ CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
+
+ // Create the temporary.
+ llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
+ "icr.temp");
+
+ // Zero-initialize it if we're not doing a copy-initialization.
+ bool shouldCopy = CRE->shouldCopy();
+ if (!shouldCopy) {
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(destType->getElementType()));
+ CGF.Builder.CreateStore(null, temp);
+ }
+
+ llvm::BasicBlock *contBB = 0;
+
+ // If the address is *not* known to be non-null, we need to switch.
+ llvm::Value *finalArgument;
+
+ bool provablyNonNull = isProvablyNonNull(srcAddr);
+ if (provablyNonNull) {
+ finalArgument = temp;
+ } else {
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+
+ finalArgument = CGF.Builder.CreateSelect(isNull,
+ llvm::ConstantPointerNull::get(destType),
+ temp, "icr.argument");
+
+ // If we need to copy, then the load has to be conditional, which
+ // means we need control flow.
+ if (shouldCopy) {
+ contBB = CGF.createBasicBlock("icr.cont");
+ llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
+ CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
+ CGF.EmitBlock(copyBB);
+ }
+ }
+
+ // Perform a copy if necessary.
+ if (shouldCopy) {
+ LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
+ RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
+ assert(srcRV.isScalar());
+
+ llvm::Value *src = srcRV.getScalarVal();
+ src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
+ "icr.cast");
+
+ // Use an ordinary store, not a store-to-lvalue.
+ CGF.Builder.CreateStore(src, temp);
+ }
+
+ // Finish the control flow if we needed it.
+ if (shouldCopy && !provablyNonNull)
+ CGF.EmitBlock(contBB);
+
+ args.addWriteback(srcAddr, srcAddrType, temp);
+ args.add(RValue::get(finalArgument), CRE->getType());
+}
+
void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
+ if (const ObjCIndirectCopyRestoreExpr *CRE
+ = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
+ assert(getContext().getLangOptions().ObjCAutoRefCount);
+ assert(getContext().hasSameType(E->getType(), type));
+ return emitWritebackArg(*this, args, CRE);
+ }
+
if (type->isReferenceType())
return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
type);
- if (hasAggregateLLVMType(type) && isa<ImplicitCastExpr>(E) &&
+ if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
+ isa<ImplicitCastExpr>(E) &&
cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
assert(L.isSimple());
@@ -1205,20 +1427,71 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
/// on the current state of the EH stack.
llvm::CallSite
CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
- llvm::Value * const *ArgBegin,
- llvm::Value * const *ArgEnd,
+ llvm::ArrayRef<llvm::Value *> Args,
const llvm::Twine &Name) {
llvm::BasicBlock *InvokeDest = getInvokeDest();
if (!InvokeDest)
- return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name);
+ return Builder.CreateCall(Callee, Args, Name);
llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
- ArgBegin, ArgEnd, Name);
+ Args, Name);
EmitBlock(ContBB);
return Invoke;
}
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ const llvm::Twine &Name) {
+ return EmitCallOrInvoke(Callee, llvm::ArrayRef<llvm::Value *>(), Name);
+}
+
+static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
+ llvm::FunctionType *FTy) {
+ if (ArgNo < FTy->getNumParams())
+ assert(Elt->getType() == FTy->getParamType(ArgNo));
+ else
+ assert(FTy->isVarArg());
+ ++ArgNo;
+}
+
+void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+ llvm::SmallVector<llvm::Value*,16> &Args,
+ llvm::FunctionType *IRFuncTy) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+
+ RecordDecl *RD = RT->getDecl();
+ assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, 0);
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()),
+ Args, IRFuncTy);
+ continue;
+ }
+
+ RValue RV = EmitLoadOfLValue(LV);
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+
+ // Insert a bitcast as needed.
+ llvm::Value *V = RV.getScalarVal();
+ if (Args.size() < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(Args.size()))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
+
+ Args.push_back(V);
+ }
+}
+
+
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
@@ -1233,6 +1506,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
+ // IRArgNo - Keep track of the argument number in the callee we're looking at.
+ unsigned IRArgNo = 0;
+ llvm::FunctionType *IRFuncTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(Callee->getType())->getElementType());
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
@@ -1241,6 +1519,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!Value)
Value = CreateMemTemp(RetTy);
Args.push_back(Value);
+ checkArgMatches(Value, IRArgNo, IRFuncTy);
}
assert(CallInfo.arg_size() == CallArgs.size() &&
@@ -1251,24 +1530,54 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->RV;
- unsigned Alignment =
+ unsigned TypeAlign =
getContext().getTypeAlignInChars(I->Ty).getQuantity();
switch (ArgInfo.getKind()) {
case ABIArgInfo::Indirect: {
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
- Args.push_back(CreateMemTemp(I->Ty));
+ llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
+ if (ArgInfo.getIndirectAlign() > AI->getAlignment())
+ AI->setAlignment(ArgInfo.getIndirectAlign());
+ Args.push_back(AI);
+
if (RV.isScalar())
EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
- Alignment, I->Ty);
+ TypeAlign, I->Ty);
else
StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
- } else if (I->NeedsCopy && !ArgInfo.getIndirectByVal()) {
- Args.push_back(CreateMemTemp(I->Ty));
- EmitAggregateCopy(Args.back(), RV.getAggregateAddr(), I->Ty,
- RV.isVolatileQualified());
+
+ // Validate argument match.
+ checkArgMatches(AI, IRArgNo, IRFuncTy);
} else {
- Args.push_back(RV.getAggregateAddr());
+ // We want to avoid creating an unnecessary temporary+copy here;
+ // however, we need one in two cases:
+ // 1. If the argument is not byval, and we are required to copy the
+ // source. (This case doesn't occur on any common architecture.)
+ // 2. If the argument is byval, RV is not sufficiently aligned, and
+ // we cannot force it to be sufficiently aligned.
+ llvm::Value *Addr = RV.getAggregateAddr();
+ unsigned Align = ArgInfo.getIndirectAlign();
+ const llvm::TargetData *TD = &CGM.getTargetData();
+ if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
+ (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
+ llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
+ // Create an aligned temporary, and copy to it.
+ llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
+ if (Align > AI->getAlignment())
+ AI->setAlignment(Align);
+ Args.push_back(AI);
+ EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
+
+ // Validate argument match.
+ checkArgMatches(AI, IRArgNo, IRFuncTy);
+ } else {
+ // Skip the extra memcpy call.
+ Args.push_back(Addr);
+
+ // Validate argument match.
+ checkArgMatches(Addr, IRArgNo, IRFuncTy);
+ }
}
break;
}
@@ -1281,10 +1590,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
+ llvm::Value *V;
if (RV.isScalar())
- Args.push_back(RV.getScalarVal());
+ V = RV.getScalarVal();
else
- Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
+ V = Builder.CreateLoad(RV.getAggregateAddr());
+
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ if (IRArgNo < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(IRArgNo))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
+ Args.push_back(V);
+
+ checkArgMatches(V, IRArgNo, IRFuncTy);
break;
}
@@ -1292,7 +1611,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *SrcPtr;
if (RV.isScalar()) {
SrcPtr = CreateMemTemp(I->Ty, "coerce");
- EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment, I->Ty);
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
} else if (RV.isComplex()) {
SrcPtr = CreateMemTemp(I->Ty, "coerce");
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
@@ -1321,18 +1640,25 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// We don't know what we're loading from.
LI->setAlignment(1);
Args.push_back(LI);
+
+ // Validate argument match.
+ checkArgMatches(LI, IRArgNo, IRFuncTy);
}
} else {
// In the simple case, just pass the coerced loaded value.
Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
*this));
+
+ // Validate argument match.
+ checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
}
break;
}
case ABIArgInfo::Expand:
- ExpandTypeToArgs(I->Ty, RV, Args);
+ ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
+ IRArgNo = Args.size();
break;
}
}
@@ -1367,7 +1693,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
-
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
@@ -1380,11 +1705,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::CallSite CS;
if (!InvokeDest) {
- CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
+ CS = Builder.CreateCall(Callee, Args);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
- Args.data(), Args.data()+Args.size());
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
EmitBlock(Cont);
}
if (callOrInvoke)
@@ -1413,6 +1737,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
CI->setName("call");
+ // Emit any writebacks immediately. Arguably this should happen
+ // after any return-value munging.
+ if (CallArgs.hasWritebacks())
+ emitWritebacks(*this, CallArgs);
+
switch (RetAI.getKind()) {
case ABIArgInfo::Indirect: {
unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
@@ -1430,8 +1759,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
- RetAI.getDirectOffset() == 0) {
+ llvm::Type *RetIRTy = ConvertType(RetTy);
+ if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
if (RetTy->isAnyComplexType()) {
llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
@@ -1448,7 +1777,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
return RValue::getAggregate(DestPtr);
}
- return RValue::get(CI);
+
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ llvm::Value *V = CI;
+ if (V->getType() != RetIRTy)
+ V = Builder.CreateBitCast(V, RetIRTy);
+ return RValue::get(V);
}
llvm::Value *DestPtr = ReturnValue.getValue();
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 160a62e..343b944 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -58,9 +58,44 @@ namespace CodeGen {
class CallArgList :
public llvm::SmallVector<CallArg, 16> {
public:
+ struct Writeback {
+ /// The original argument.
+ llvm::Value *Address;
+
+ /// The pointee type of the original argument.
+ QualType AddressType;
+
+ /// The temporary alloca.
+ llvm::Value *Temporary;
+ };
+
void add(RValue rvalue, QualType type, bool needscopy = false) {
push_back(CallArg(rvalue, type, needscopy));
}
+
+ void addFrom(const CallArgList &other) {
+ insert(end(), other.begin(), other.end());
+ Writebacks.insert(Writebacks.end(),
+ other.Writebacks.begin(), other.Writebacks.end());
+ }
+
+ void addWriteback(llvm::Value *address, QualType addressType,
+ llvm::Value *temporary) {
+ Writeback writeback;
+ writeback.Address = address;
+ writeback.AddressType = addressType;
+ writeback.Temporary = temporary;
+ Writebacks.push_back(writeback);
+ }
+
+ bool hasWritebacks() const { return !Writebacks.empty(); }
+
+ typedef llvm::SmallVectorImpl<Writeback>::const_iterator writeback_iterator;
+ writeback_iterator writeback_begin() const { return Writebacks.begin(); }
+ writeback_iterator writeback_end() const { return Writebacks.end(); }
+
+ private:
+ llvm::SmallVector<Writeback, 1> Writebacks;
};
/// FunctionArgList - Type for representing both the decl and type
@@ -88,6 +123,9 @@ namespace CodeGen {
/// Whether this function is noreturn.
bool NoReturn;
+ /// Whether this function is returns-retained.
+ bool ReturnsRetained;
+
unsigned NumArgs;
ArgInfo *Args;
@@ -100,7 +138,8 @@ namespace CodeGen {
typedef ArgInfo *arg_iterator;
CGFunctionInfo(unsigned CallingConvention, bool NoReturn,
- bool HasRegParm, unsigned RegParm, CanQualType ResTy,
+ bool ReturnsRetained, bool HasRegParm, unsigned RegParm,
+ CanQualType ResTy,
const CanQualType *ArgTys, unsigned NumArgTys);
~CGFunctionInfo() { delete[] Args; }
@@ -113,6 +152,10 @@ namespace CodeGen {
bool isNoReturn() const { return NoReturn; }
+ /// In ARR, whether this function retains its return value. This
+ /// is not always reliable for call sites.
+ bool isReturnsRetained() const { return ReturnsRetained; }
+
/// getCallingConvention - Return the user specified calling
/// convention.
unsigned getCallingConvention() const { return CallingConvention; }
@@ -137,6 +180,7 @@ namespace CodeGen {
void Profile(llvm::FoldingSetNodeID &ID) {
ID.AddInteger(getCallingConvention());
ID.AddBoolean(NoReturn);
+ ID.AddBoolean(ReturnsRetained);
ID.AddBoolean(HasRegParm);
ID.AddInteger(RegParm);
getReturnType().Profile(ID);
@@ -151,6 +195,7 @@ namespace CodeGen {
Iterator end) {
ID.AddInteger(Info.getCC());
ID.AddBoolean(Info.getNoReturn());
+ ID.AddBoolean(Info.getProducesResult());
ID.AddBoolean(Info.getHasRegParm());
ID.AddInteger(Info.getRegParm());
ResTy.Profile(ID);
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 5725d80..7dbaaf8 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -329,7 +329,7 @@ namespace {
CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
: BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
const CXXRecordDecl *DerivedClass =
cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
@@ -398,7 +398,8 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
BaseClassDecl,
isBaseVirtual);
- AggValueSlot AggSlot = AggValueSlot::forAddr(V, false, /*Lifetime*/ true);
+ AggValueSlot AggSlot = AggValueSlot::forAddr(V, Qualifiers(),
+ /*Lifetime*/ true);
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
@@ -428,10 +429,18 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
CGF.Builder.CreateStore(Next, ArrayIndexVar);
}
- AggValueSlot Slot = AggValueSlot::forAddr(Dest, LHS.isVolatileQualified(),
- /*Lifetime*/ true);
-
- CGF.EmitAggExpr(MemberInit->getInit(), Slot);
+ if (!CGF.hasAggregateLLVMType(T)) {
+ LValue lvalue = CGF.MakeAddrLValue(Dest, T);
+ CGF.EmitScalarInit(MemberInit->getInit(), /*decl*/ 0, lvalue, false);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), Dest,
+ LHS.isVolatileQualified());
+ } else {
+ AggValueSlot Slot = AggValueSlot::forAddr(Dest, LHS.getQuals(),
+ /*Lifetime*/ true);
+
+ CGF.EmitAggExpr(MemberInit->getInit(), Slot);
+ }
return;
}
@@ -502,7 +511,7 @@ namespace {
CallMemberDtor(FieldDecl *Field, CXXDestructorDecl *Dtor)
: Field(Field), Dtor(Dtor) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
// FIXME: Is this OK for C++0x delegating constructors?
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
@@ -540,15 +549,15 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// FIXME: If there's no initializer and the CXXCtorInitializer
// was implicitly generated, we shouldn't be zeroing memory.
- RValue RHS;
- if (FieldType->isReferenceType()) {
- RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(), Field);
- CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
- } else if (FieldType->isArrayType() && !MemberInit->getInit()) {
+ if (FieldType->isArrayType() && !MemberInit->getInit()) {
CGF.EmitNullInitialization(LHS.getAddress(), Field->getType());
} else if (!CGF.hasAggregateLLVMType(Field->getType())) {
- RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
- CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
+ if (LHS.isSimple()) {
+ CGF.EmitExprAsInit(MemberInit->getInit(), Field, LHS, false);
+ } else {
+ RValue RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
+ CGF.EmitStoreThroughLValue(RHS, LHS);
+ }
} else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
LHS.isVolatileQualified());
@@ -576,11 +585,11 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
CGF.Builder.CreateStore(Zero, ArrayIndexVar);
- // If we are copying an array of scalars or classes with trivial copy
+ // If we are copying an array of PODs or classes with trivial copy
// constructors, perform a single aggregate copy.
- const RecordType *Record = BaseElementTy->getAs<RecordType>();
- if (!Record ||
- cast<CXXRecordDecl>(Record->getDecl())->hasTrivialCopyConstructor()) {
+ const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
+ if (BaseElementTy.isPODType(CGF.getContext()) ||
+ (Record && Record->hasTrivialCopyConstructor())) {
// Find the source pointer. We knows it's the last argument because
// we know we're in a copy constructor.
unsigned SrcArgIndex = Args.size() - 1;
@@ -912,7 +921,7 @@ namespace {
struct CallDtorDelete : EHScopeStack::Cleanup {
CallDtorDelete() {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
const CXXRecordDecl *ClassDecl = Dtor->getParent();
CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
@@ -920,48 +929,25 @@ namespace {
}
};
- struct CallArrayFieldDtor : EHScopeStack::Cleanup {
- const FieldDecl *Field;
- CallArrayFieldDtor(const FieldDecl *Field) : Field(Field) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- QualType FieldType = Field->getType();
- const ConstantArrayType *Array =
- CGF.getContext().getAsConstantArrayType(FieldType);
+ class DestroyField : public EHScopeStack::Cleanup {
+ const FieldDecl *field;
+ CodeGenFunction::Destroyer &destroyer;
+ bool useEHCleanupForArray;
+
+ public:
+ DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : field(field), destroyer(*destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Find the address of the field.
+ llvm::Value *thisValue = CGF.LoadCXXThis();
+ LValue LV = CGF.EmitLValueForField(thisValue, field, /*CVRQualifiers=*/0);
+ assert(LV.isSimple());
- QualType BaseType =
- CGF.getContext().getBaseElementType(Array->getElementType());
- const CXXRecordDecl *FieldClassDecl = BaseType->getAsCXXRecordDecl();
-
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue LHS = CGF.EmitLValueForField(ThisPtr, Field,
- // FIXME: Qualifiers?
- /*CVRQualifiers=*/0);
-
- const llvm::Type *BasePtr = CGF.ConvertType(BaseType)->getPointerTo();
- llvm::Value *BaseAddrPtr =
- CGF.Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- CGF.EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(),
- Array, BaseAddrPtr);
- }
- };
-
- struct CallFieldDtor : EHScopeStack::Cleanup {
- const FieldDecl *Field;
- CallFieldDtor(const FieldDecl *Field) : Field(Field) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- const CXXRecordDecl *FieldClassDecl =
- Field->getType()->getAsCXXRecordDecl();
-
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue LHS = CGF.EmitLValueForField(ThisPtr, Field,
- // FIXME: Qualifiers?
- /*CVRQualifiers=*/0);
-
- CGF.EmitCXXDestructorCall(FieldClassDecl->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- LHS.getAddress());
+ CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
+ flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
}
@@ -1035,96 +1021,103 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
llvm::SmallVector<const FieldDecl *, 16> FieldDecls;
for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
E = ClassDecl->field_end(); I != E; ++I) {
- const FieldDecl *Field = *I;
-
- QualType FieldType = getContext().getCanonicalType(Field->getType());
- const ConstantArrayType *Array =
- getContext().getAsConstantArrayType(FieldType);
- if (Array)
- FieldType = getContext().getBaseElementType(Array->getElementType());
-
- const RecordType *RT = FieldType->getAs<RecordType>();
- if (!RT)
- continue;
-
- CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (FieldClassDecl->hasTrivialDestructor())
- continue;
-
- if (Array)
- EHStack.pushCleanup<CallArrayFieldDtor>(NormalAndEHCleanup, Field);
- else
- EHStack.pushCleanup<CallFieldDtor>(NormalAndEHCleanup, Field);
+ const FieldDecl *field = *I;
+ QualType type = field->getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind) continue;
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ EHStack.pushCleanup<DestroyField>(cleanupKind, field,
+ getDestroyer(dtorKind),
+ cleanupKind & EHCleanup);
}
}
-/// EmitCXXAggrConstructorCall - This routine essentially creates a (nested)
-/// for-loop to call the default constructor on individual members of the
-/// array.
-/// 'D' is the default constructor for elements of the array, 'ArrayTy' is the
-/// array type and 'ArrayPtr' points to the beginning fo the array.
-/// It is assumed that all relevant checks have been made by the caller.
+/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
+/// constructor for each of several members of an array.
///
-/// \param ZeroInitialization True if each element should be zero-initialized
-/// before it is constructed.
+/// \param ctor the constructor to call for each element
+/// \param argBegin,argEnd the arguments to evaluate and pass to the
+/// constructor
+/// \param arrayType the type of the array to initialize
+/// \param arrayBegin an arrayType*
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
void
-CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
- const ConstantArrayType *ArrayTy,
- llvm::Value *ArrayPtr,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
- bool ZeroInitialization) {
-
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
- llvm::Value * NumElements =
- llvm::ConstantInt::get(SizeTy,
- getContext().getConstantArrayElementCount(ArrayTy));
-
- EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr, ArgBeg, ArgEnd,
- ZeroInitialization);
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ const ConstantArrayType *arrayType,
+ llvm::Value *arrayBegin,
+ CallExpr::const_arg_iterator argBegin,
+ CallExpr::const_arg_iterator argEnd,
+ bool zeroInitialize) {
+ QualType elementType;
+ llvm::Value *numElements =
+ emitArrayLength(arrayType, elementType, arrayBegin);
+
+ EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
+ argBegin, argEnd, zeroInitialize);
}
+/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
+/// constructor for each of several members of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param numElements the number of elements in the array;
+/// may be zero
+/// \param argBegin,argEnd the arguments to evaluate and pass to the
+/// constructor
+/// \param arrayBegin a T*, where T is the type constructed by ctor
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
void
-CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
- llvm::Value *NumElements,
- llvm::Value *ArrayPtr,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
- bool ZeroInitialization) {
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
-
- // Create a temporary for the loop index and initialize it with 0.
- llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
- llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
- Builder.CreateStore(Zero, IndexPtr);
-
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
- EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = createBasicBlock("for.body");
-
- // Generate: if (loop-index < number-of-elements fall to the loop body,
- // otherwise, go to the block after the for-loop.
- llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
- // If the condition is true, execute the body.
- Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
- EmitBlock(ForBody);
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ llvm::Value *numElements,
+ llvm::Value *arrayBegin,
+ CallExpr::const_arg_iterator argBegin,
+ CallExpr::const_arg_iterator argEnd,
+ bool zeroInitialize) {
+
+ // It's legal for numElements to be zero. This can happen both
+ // dynamically, because x can be zero in 'new A[x]', and statically,
+ // because of GCC extensions that permit zero-length arrays. There
+ // are probably legitimate places where we could assume that this
+ // doesn't happen, but it's not clear that it's worth it.
+ llvm::BranchInst *zeroCheckBranch = 0;
+
+ // Optimize for a constant count.
+ llvm::ConstantInt *constantCount
+ = dyn_cast<llvm::ConstantInt>(numElements);
+ if (constantCount) {
+ // Just skip out if the constant count is zero.
+ if (constantCount->isZero()) return;
+
+ // Otherwise, emit the check.
+ } else {
+ llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
+ llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
+ zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
+ EmitBlock(loopBB);
+ }
+
+ // Find the end of the array.
+ llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
+ "arrayctor.end");
+
+ // Enter the loop, setting up a phi for the current location to initialize.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
+ EmitBlock(loopBB);
+ llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
+ "arrayctor.cur");
+ cur->addIncoming(arrayBegin, entryBB);
- llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
// Inside the loop body, emit the constructor call on the array element.
- Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *Address = Builder.CreateInBoundsGEP(ArrayPtr, Counter,
- "arrayidx");
+
+ QualType type = getContext().getTypeDeclType(ctor->getParent());
// Zero initialize the storage, if requested.
- if (ZeroInitialization)
- EmitNullInitialization(Address,
- getContext().getTypeDeclType(D->getParent()));
+ if (zeroInitialize)
+ EmitNullInitialization(cur, type);
// C++ [class.temporary]p4:
// There are two contexts in which temporaries are destroyed at a different
@@ -1134,99 +1127,47 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
// every temporary created in a default argument expression is sequenced
// before the construction of the next array element, if any.
- // Keep track of the current number of live temporaries.
{
RunCleanupsScope Scope(*this);
- EmitCXXConstructorCall(D, Ctor_Complete, /*ForVirtualBase=*/false, Address,
- ArgBeg, ArgEnd);
+ // Evaluate the constructor and its arguments in a regular
+ // partial-destroy cleanup.
+ if (getLangOptions().Exceptions &&
+ !ctor->getParent()->hasTrivialDestructor()) {
+ Destroyer *destroyer = destroyCXXObject;
+ pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
+ }
+
+ EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
+ cur, argBegin, argEnd);
}
- EmitBlock(ContinueBlock);
+ // Go to the next element.
+ llvm::Value *next =
+ Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
+ "arrayctor.next");
+ cur->addIncoming(next, Builder.GetInsertBlock());
- // Emit the increment of the loop counter.
- llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
- Counter = Builder.CreateLoad(IndexPtr);
- NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
- Builder.CreateStore(NextVal, IndexPtr);
+ // Check whether that's the end of the loop.
+ llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
+ llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
+ Builder.CreateCondBr(done, contBB, loopBB);
- // Finally, branch back up to the condition for the next iteration.
- EmitBranch(CondBlock);
+ // Patch the earlier check to skip over the loop.
+ if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
- // Emit the fall-through block.
- EmitBlock(AfterFor, true);
-}
-
-/// EmitCXXAggrDestructorCall - calls the default destructor on array
-/// elements in reverse order of construction.
-void
-CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This) {
- const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
- assert(CA && "Do we support VLA for destruction ?");
- uint64_t ElementCount = getContext().getConstantArrayElementCount(CA);
-
- const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
- llvm::Value* ElementCountPtr = llvm::ConstantInt::get(SizeLTy, ElementCount);
- EmitCXXAggrDestructorCall(D, ElementCountPtr, This);
+ EmitBlock(contBB);
}
-/// EmitCXXAggrDestructorCall - calls the default destructor on array
-/// elements in reverse order of construction.
-void
-CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
- llvm::Value *UpperCount,
- llvm::Value *This) {
- const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
- llvm::Value *One = llvm::ConstantInt::get(SizeLTy, 1);
-
- // Create a temporary for the loop index and initialize it with count of
- // array elements.
- llvm::Value *IndexPtr = CreateTempAlloca(SizeLTy, "loop.index");
-
- // Store the number of elements in the index pointer.
- Builder.CreateStore(UpperCount, IndexPtr);
-
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
- EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = createBasicBlock("for.body");
-
- // Generate: if (loop-index != 0 fall to the loop body,
- // otherwise, go to the block after the for-loop.
- llvm::Value* zeroConstant =
- llvm::Constant::getNullValue(SizeLTy);
- llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsNE = Builder.CreateICmpNE(Counter, zeroConstant,
- "isne");
- // If the condition is true, execute the body.
- Builder.CreateCondBr(IsNE, ForBody, AfterFor);
-
- EmitBlock(ForBody);
-
- llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
- // Inside the loop body, emit the constructor call on the array element.
- Counter = Builder.CreateLoad(IndexPtr);
- Counter = Builder.CreateSub(Counter, One);
- llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Address);
-
- EmitBlock(ContinueBlock);
-
- // Emit the decrement of the loop counter.
- Counter = Builder.CreateLoad(IndexPtr);
- Counter = Builder.CreateSub(Counter, One, "dec");
- Builder.CreateStore(Counter, IndexPtr);
-
- // Finally, branch back up to the condition for the next iteration.
- EmitBranch(CondBlock);
-
- // Emit the fall-through block.
- EmitBlock(AfterFor, true);
+void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ const RecordType *rtype = type->castAs<RecordType>();
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const CXXDestructorDecl *dtor = record->getDestructor();
+ assert(!dtor->isTrivial());
+ CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
+ addr);
}
void
@@ -1370,7 +1311,7 @@ namespace {
CXXDtorType Type)
: Dtor(D), Addr(Addr), Type(Type) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
Addr);
}
@@ -1384,7 +1325,8 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
llvm::Value *ThisPtr = LoadCXXThis();
- AggValueSlot AggSlot = AggValueSlot::forAddr(ThisPtr, false, /*Lifetime*/ true);
+ AggValueSlot AggSlot =
+ AggValueSlot::forAddr(ThisPtr, Qualifiers(), /*Lifetime*/ true);
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
@@ -1424,7 +1366,7 @@ namespace {
CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
: Dtor(D), Addr(Addr) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false, Addr);
}
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 41ecd81..9c5dd1f 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -49,8 +49,7 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
if (rv.isComplex()) {
CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
const llvm::Type *ComplexTy =
- llvm::StructType::get(CGF.getLLVMContext(),
- V.first->getType(), V.second->getType(),
+ llvm::StructType::get(V.first->getType(), V.second->getType(),
(void*) 0);
llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
CGF.StoreComplexToAddr(V, addr, /*volatile*/ false);
@@ -257,9 +256,7 @@ void CodeGenFunction::initFullExprCleanup() {
if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
}
-EHScopeStack::Cleanup::~Cleanup() {
- llvm_unreachable("Cleanup is indestructable");
-}
+void EHScopeStack::Cleanup::anchor() {}
/// All the branch fixups on the EH stack have propagated out past the
/// outermost normal cleanup; resolve them all by adding cases to the
@@ -421,13 +418,13 @@ static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
// Kill the branch.
Br->eraseFromParent();
- // Merge the blocks.
- Pred->getInstList().splice(Pred->end(), Entry->getInstList());
-
// Replace all uses of the entry with the predecessor, in case there
// are phis in the cleanup.
Entry->replaceAllUsesWith(Pred);
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
+
// Kill the entry block.
Entry->eraseFromParent();
@@ -439,10 +436,10 @@ static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
static void EmitCleanup(CodeGenFunction &CGF,
EHScopeStack::Cleanup *Fn,
- bool ForEH,
+ EHScopeStack::Cleanup::Flags flags,
llvm::Value *ActiveFlag) {
// EH cleanups always occur within a terminate scope.
- if (ForEH) CGF.EHStack.pushTerminate();
+ if (flags.isForEHCleanup()) CGF.EHStack.pushTerminate();
// If there's an active flag, load it and skip the cleanup if it's
// false.
@@ -457,7 +454,7 @@ static void EmitCleanup(CodeGenFunction &CGF,
}
// Ask the cleanup to emit itself.
- Fn->Emit(CGF, ForEH);
+ Fn->Emit(CGF, flags);
assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
// Emit the continuation block if there was an active flag.
@@ -465,7 +462,7 @@ static void EmitCleanup(CodeGenFunction &CGF,
CGF.EmitBlock(ContBB);
// Leave the terminate scope.
- if (ForEH) CGF.EHStack.popTerminate();
+ if (flags.isForEHCleanup()) CGF.EHStack.popTerminate();
}
static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
@@ -540,6 +537,12 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
RequiresNormalCleanup = true;
}
+ EHScopeStack::Cleanup::Flags cleanupFlags;
+ if (Scope.isNormalCleanup())
+ cleanupFlags.setIsNormalCleanupKind();
+ if (Scope.isEHCleanup())
+ cleanupFlags.setIsEHCleanupKind();
+
// Even if we don't need the normal cleanup, we might still have
// prebranched fallthrough to worry about.
if (Scope.isNormalCleanup() && !RequiresNormalCleanup &&
@@ -663,7 +666,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EHStack.popCleanup();
- EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag);
+ EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
// Otherwise, the best approach is to thread everything through
// the cleanup block and then try to clean up after ourselves.
@@ -774,7 +777,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EHStack.popCleanup();
assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
- EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag);
+ EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
// Append the prepared cleanup prologue from above.
llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
@@ -857,7 +860,9 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
EmitBlock(EHEntry);
- EmitCleanup(*this, Fn, /*ForEH*/ true, EHActiveFlag);
+
+ cleanupFlags.setIsForEHCleanup();
+ EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
// Append the prepared cleanup prologue from above.
llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 98d30db..4c12445 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -314,8 +314,9 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
llvm::SmallVector<llvm::Value *, 16> EltTys;
llvm::DIType FieldTy =
- DBuilder.createMemberType("isa", getOrCreateMainFile(),
- 0,Size, 0, 0, 0, ISATy);
+ DBuilder.createMemberType(getOrCreateMainFile(), "isa",
+ getOrCreateMainFile(), 0, Size,
+ 0, 0, 0, ISATy);
EltTys.push_back(FieldTy);
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
@@ -389,6 +390,7 @@ llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) {
// Ignore these qualifiers for now.
Qc.removeObjCGCAttr();
Qc.removeAddressSpace();
+ Qc.removeObjCLifetime();
// We will create one Derived type for one qualifier and recurse to handle any
// additional ones.
@@ -528,7 +530,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
FieldTy = DescTy;
FieldSize = CGM.getContext().getTypeSize(Ty);
FieldAlign = CGM.getContext().getTypeAlign(Ty);
- FieldTy = DBuilder.createMemberType("__descriptor", Unit,
+ FieldTy = DBuilder.createMemberType(Unit, "__descriptor", Unit,
LineNo, FieldSize, FieldAlign,
FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
@@ -591,7 +593,8 @@ llvm::DIType CGDebugInfo::createFieldType(llvm::StringRef name,
SourceLocation loc,
AccessSpecifier AS,
uint64_t offsetInBits,
- llvm::DIFile tunit) {
+ llvm::DIFile tunit,
+ llvm::DIDescriptor scope) {
llvm::DIType debugType = getOrCreateType(type, tunit);
// Get the location for the field.
@@ -613,15 +616,16 @@ llvm::DIType CGDebugInfo::createFieldType(llvm::StringRef name,
else if (AS == clang::AS_protected)
flags |= llvm::DIDescriptor::FlagProtected;
- return DBuilder.createMemberType(name, file, line, sizeInBits, alignInBits,
- offsetInBits, flags, debugType);
+ return DBuilder.createMemberType(scope, name, file, line, sizeInBits,
+ alignInBits, offsetInBits, flags, debugType);
}
/// CollectRecordFields - A helper function to collect debug info for
/// record fields. This is used while creating debug info entry for a Record.
void CGDebugInfo::
CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
- llvm::SmallVectorImpl<llvm::Value *> &elements) {
+ llvm::SmallVectorImpl<llvm::Value *> &elements,
+ llvm::DIType RecordTy) {
unsigned fieldNo = 0;
const FieldDecl *LastFD = 0;
bool IsMsStruct = record->hasAttr<MsStructAttr>();
@@ -652,7 +656,7 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
llvm::DIType fieldType
= createFieldType(name, type, field->getBitWidth(),
field->getLocation(), field->getAccess(),
- layout.getFieldOffset(fieldNo), tunit);
+ layout.getFieldOffset(fieldNo), tunit, RecordTy);
elements.push_back(fieldType);
}
@@ -960,7 +964,7 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType VPTR
- = DBuilder.createMemberType(getVTableName(RD), Unit,
+ = DBuilder.createMemberType(Unit, getVTableName(RD), Unit,
0, Size, 0, 0, 0,
getOrCreateVTablePtrType(Unit));
EltTys.push_back(VPTR);
@@ -1048,7 +1052,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
}
}
- CollectRecordFields(RD, Unit, EltTys);
+ CollectRecordFields(RD, Unit, EltTys, FwdDecl);
llvm::DIArray TParamsArray;
if (CXXDecl) {
CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
@@ -1379,13 +1383,13 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
// FIXME: This should probably be a function type instead.
ElementTypes[0] =
- DBuilder.createMemberType("ptr", U, 0,
+ DBuilder.createMemberType(U, "ptr", U, 0,
Info.first, Info.second, FieldOffset, 0,
PointerDiffDITy);
FieldOffset += Info.first;
ElementTypes[1] =
- DBuilder.createMemberType("ptr", U, 0,
+ DBuilder.createMemberType(U, "ptr", U, 0,
Info.first, Info.second, FieldOffset, 0,
PointerDiffDITy);
@@ -1520,10 +1524,7 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
#include "clang/AST/TypeNodes.def"
assert(false && "Dependent types cannot show up in debug information");
- // FIXME: Handle these.
case Type::ExtVector:
- return llvm::DIType();
-
case Type::Vector:
return CreateType(cast<VectorType>(Ty), Unit);
case Type::ObjCObjectPointer:
@@ -1586,7 +1587,7 @@ llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
- llvm::DIType Ty = DBuilder.createMemberType(Name, Unit, 0,
+ llvm::DIType Ty = DBuilder.createMemberType(Unit, Name, Unit, 0,
FieldSize, FieldAlign,
*Offset, 0, FieldTy);
*Offset += FieldSize;
@@ -1897,7 +1898,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
FieldAlign = CGM.getContext().toBits(Align);
*XOffset = FieldOffset;
- FieldTy = DBuilder.createMemberType(VD->getName(), Unit,
+ FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit,
0, FieldSize, FieldAlign,
FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
@@ -2133,23 +2134,23 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::SmallVector<llvm::Value*, 16> fields;
fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(0),
- tunit));
+ tunit, tunit));
fields.push_back(createFieldType("__flags", C.IntTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(1),
- tunit));
+ tunit, tunit));
fields.push_back(createFieldType("__reserved", C.IntTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(2),
- tunit));
+ tunit, tunit));
fields.push_back(createFieldType("__FuncPtr", C.VoidPtrTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(3),
- tunit));
+ tunit, tunit));
fields.push_back(createFieldType("__descriptor",
C.getPointerType(block.NeedsCopyDispose ?
C.getBlockDescriptorExtendedType() :
C.getBlockDescriptorType()),
0, loc, AS_public,
blockLayout->getElementOffsetInBits(4),
- tunit));
+ tunit, tunit));
// We want to sort the captures by offset, not because DWARF
// requires this, but because we're paranoid about debuggers.
@@ -2198,7 +2199,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
QualType type = method->getThisType(C);
fields.push_back(createFieldType("this", type, 0, loc, AS_public,
- offsetInBits, tunit));
+ offsetInBits, tunit, tunit));
continue;
}
@@ -2213,12 +2214,12 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
uint64_t xoffset;
fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
fieldType = DBuilder.createPointerType(fieldType, ptrInfo.first);
- fieldType = DBuilder.createMemberType(name, tunit, line,
+ fieldType = DBuilder.createMemberType(tunit, name, tunit, line,
ptrInfo.first, ptrInfo.second,
offsetInBits, 0, fieldType);
} else {
fieldType = createFieldType(name, variable->getType(), 0,
- loc, AS_public, offsetInBits, tunit);
+ loc, AS_public, offsetInBits, tunit, tunit);
}
fields.push_back(fieldType);
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 6ec6b65..f87d007 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -33,11 +33,11 @@ namespace clang {
class VarDecl;
class ObjCInterfaceDecl;
class ClassTemplateSpecializationDecl;
+ class GlobalDecl;
namespace CodeGen {
class CodeGenModule;
class CodeGenFunction;
- class GlobalDecl;
class CGBlockInfo;
/// CGDebugInfo - This class gathers all debug information during compilation
@@ -139,9 +139,11 @@ class CGDebugInfo {
llvm::DIType createFieldType(llvm::StringRef name, QualType type,
Expr *bitWidth, SourceLocation loc,
AccessSpecifier AS, uint64_t offsetInBits,
- llvm::DIFile tunit);
+ llvm::DIFile tunit,
+ llvm::DIDescriptor scope);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::Value *> &E);
+ llvm::SmallVectorImpl<llvm::Value *> &E,
+ llvm::DIType RecordTy);
void CollectVTableInfo(const CXXRecordDecl *Decl,
llvm::DIFile F,
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 8a1a853..62c3a97 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -98,7 +98,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
QualType Ty = TD.getUnderlyingType();
if (Ty->isVariablyModifiedType())
- EmitVLASize(Ty);
+ EmitVariablyModifiedType(Ty);
}
}
}
@@ -258,7 +258,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// even though that doesn't really make any sense.
// Make sure to evaluate VLA bounds now so that we have them for later.
if (D.getType()->isVariablyModifiedType())
- EmitVLASize(D.getType());
+ EmitVariablyModifiedType(D.getType());
// Local static block variables must be treated as globals as they may be
// referenced in their RHS initializer block-literal expresion.
@@ -304,38 +304,40 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
}
namespace {
- struct CallArrayDtor : EHScopeStack::Cleanup {
- CallArrayDtor(const CXXDestructorDecl *Dtor,
- const ConstantArrayType *Type,
- llvm::Value *Loc)
- : Dtor(Dtor), Type(Type), Loc(Loc) {}
-
- const CXXDestructorDecl *Dtor;
- const ConstantArrayType *Type;
- llvm::Value *Loc;
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- QualType BaseElementTy = CGF.getContext().getBaseElementType(Type);
- const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr);
- CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr);
+ struct DestroyObject : EHScopeStack::Cleanup {
+ DestroyObject(llvm::Value *addr, QualType type,
+ CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : addr(addr), type(type), destroyer(*destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ llvm::Value *addr;
+ QualType type;
+ CodeGenFunction::Destroyer &destroyer;
+ bool useEHCleanupForArray;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Don't use an EH cleanup recursively from an EH cleanup.
+ bool useEHCleanupForArray =
+ flags.isForNormalCleanup() && this->useEHCleanupForArray;
+
+ CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
}
};
- struct CallVarDtor : EHScopeStack::Cleanup {
- CallVarDtor(const CXXDestructorDecl *Dtor,
- llvm::Value *NRVOFlag,
- llvm::Value *Loc)
- : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {}
+ struct DestroyNRVOVariable : EHScopeStack::Cleanup {
+ DestroyNRVOVariable(llvm::Value *addr,
+ const CXXDestructorDecl *Dtor,
+ llvm::Value *NRVOFlag)
+ : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
const CXXDestructorDecl *Dtor;
llvm::Value *NRVOFlag;
llvm::Value *Loc;
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
// Along the exceptions path we always execute the dtor.
- bool NRVO = !IsForEH && NRVOFlag;
+ bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
llvm::BasicBlock *SkipDtorBB = 0;
if (NRVO) {
@@ -353,19 +355,31 @@ namespace {
if (NRVO) CGF.EmitBlock(SkipDtorBB);
}
};
-}
-namespace {
struct CallStackRestore : EHScopeStack::Cleanup {
llvm::Value *Stack;
CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
llvm::Value *V = CGF.Builder.CreateLoad(Stack, "tmp");
llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
CGF.Builder.CreateCall(F, V);
}
};
+ struct ExtendGCLifetime : EHScopeStack::Cleanup {
+ const VarDecl &Var;
+ ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Compute the address of the local variable, in case it's a
+ // byref or something.
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), Var.getType(), VK_LValue,
+ SourceLocation());
+ llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE));
+ CGF.EmitExtendGCLifetime(value);
+ }
+ };
+
struct CallCleanupFunction : EHScopeStack::Cleanup {
llvm::Constant *CleanupFn;
const CGFunctionInfo &FnInfo;
@@ -375,7 +389,7 @@ namespace {
const VarDecl *Var)
: CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
DeclRefExpr DRE(const_cast<VarDecl*>(&Var), Var.getType(), VK_LValue,
SourceLocation());
// Compute the address of the local variable, in case it's a byref
@@ -400,6 +414,207 @@ namespace {
};
}
+/// EmitAutoVarWithLifetime - Does the setup required for an automatic
+/// variable with lifetime.
+static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
+ llvm::Value *addr,
+ Qualifiers::ObjCLifetime lifetime) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ CodeGenFunction::Destroyer &destroyer =
+ (var.hasAttr<ObjCPreciseLifetimeAttr>()
+ ? CodeGenFunction::destroyARCStrongPrecise
+ : CodeGenFunction::destroyARCStrongImprecise);
+
+ CleanupKind cleanupKind = CGF.getARCCleanupKind();
+ CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
+ cleanupKind & EHCleanup);
+ break;
+ }
+ case Qualifiers::OCL_Autoreleasing:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
+ CodeGenFunction::destroyARCWeak,
+ /*useEHCleanup*/ true);
+ break;
+ }
+}
+
+static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
+ if (const Expr *e = dyn_cast<Expr>(s)) {
+ // Skip the most common kinds of expressions that make
+ // hierarchy-walking expensive.
+ s = e = e->IgnoreParenCasts();
+
+ if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
+ return (ref->getDecl() == &var);
+ }
+
+ for (Stmt::const_child_range children = s->children(); children; ++children)
+ // children might be null; as in missing decl or conditional of an if-stmt.
+ if ((*children) && isAccessedBy(var, *children))
+ return true;
+
+ return false;
+}
+
+static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
+ if (!decl) return false;
+ if (!isa<VarDecl>(decl)) return false;
+ const VarDecl *var = cast<VarDecl>(decl);
+ return isAccessedBy(*var, e);
+}
+
+static void drillIntoBlockVariable(CodeGenFunction &CGF,
+ LValue &lvalue,
+ const VarDecl *var) {
+ lvalue.setAddress(CGF.BuildBlockByrefAddress(lvalue.getAddress(), var));
+}
+
+void CodeGenFunction::EmitScalarInit(const Expr *init,
+ const ValueDecl *D,
+ LValue lvalue,
+ bool capturedByInit) {
+ Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
+ if (!lifetime) {
+ llvm::Value *value = EmitScalarExpr(init);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreThroughLValue(RValue::get(value), lvalue);
+ return;
+ }
+
+ // If we're emitting a value with lifetime, we have to do the
+ // initialization *before* we leave the cleanup scopes.
+ CodeGenFunction::RunCleanupsScope Scope(*this);
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init))
+ init = ewc->getSubExpr();
+
+ // We have to maintain the illusion that the variable is
+ // zero-initialized. If the variable might be accessed in its
+ // initializer, zero-initialize before running the initializer, then
+ // actually perform the initialization with an assign.
+ bool accessedByInit = false;
+ if (lifetime != Qualifiers::OCL_ExplicitNone)
+ accessedByInit = isAccessedBy(D, init);
+ if (accessedByInit) {
+ LValue tempLV = lvalue;
+ // Drill down to the __block object if necessary.
+ if (capturedByInit) {
+ // We can use a simple GEP for this because it can't have been
+ // moved yet.
+ tempLV.setAddress(Builder.CreateStructGEP(tempLV.getAddress(),
+ getByRefValueLLVMField(cast<VarDecl>(D))));
+ }
+
+ const llvm::PointerType *ty
+ = cast<llvm::PointerType>(tempLV.getAddress()->getType());
+ ty = cast<llvm::PointerType>(ty->getElementType());
+
+ llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
+
+ // If __weak, we want to use a barrier under certain conditions.
+ if (lifetime == Qualifiers::OCL_Weak)
+ EmitARCInitWeak(tempLV.getAddress(), zero);
+
+ // Otherwise just do a simple store.
+ else
+ EmitStoreOfScalar(zero, tempLV);
+ }
+
+ // Emit the initializer.
+ llvm::Value *value = 0;
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ value = EmitScalarExpr(init);
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ value = EmitARCRetainScalarExpr(init);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak: {
+ // No way to optimize a producing initializer into this. It's not
+ // worth optimizing for, because the value will immediately
+ // disappear in the common case.
+ value = EmitScalarExpr(init);
+
+ if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ if (accessedByInit)
+ EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
+ else
+ EmitARCInitWeak(lvalue.getAddress(), value);
+ return;
+ }
+
+ case Qualifiers::OCL_Autoreleasing:
+ value = EmitARCRetainAutoreleaseScalarExpr(init);
+ break;
+ }
+
+ if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+
+ // If the variable might have been accessed by its initializer, we
+ // might have to initialize with a barrier. We have to do this for
+ // both __weak and __strong, but __weak got filtered out above.
+ if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
+ llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
+ EmitStoreOfScalar(value, lvalue);
+ EmitARCRelease(oldValue, /*precise*/ false);
+ return;
+ }
+
+ EmitStoreOfScalar(value, lvalue);
+}
+
+/// EmitScalarInit - Initialize the given lvalue with the given object.
+void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
+ Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
+ if (!lifetime)
+ return EmitStoreThroughLValue(RValue::get(init), lvalue);
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong:
+ init = EmitARCRetain(lvalue.getType(), init);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // Initialize and then skip the primitive store.
+ EmitARCInitWeak(lvalue.getAddress(), init);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ init = EmitARCRetainAutorelease(lvalue.getType(), init);
+ break;
+ }
+
+ EmitStoreOfScalar(init, lvalue);
+}
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
/// non-zero parts of the specified initializer with equal or fewer than
@@ -508,6 +723,10 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
CharUnits alignment = getContext().getDeclAlign(&D);
emission.Alignment = alignment;
+ // If the type is variably-modified, emit all the VLA sizes for it.
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
@@ -521,7 +740,9 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// arrays as long as the initialization is trivial (e.g. if they
// have a non-trivial destructor, but not a non-trivial constructor).
if (D.getInit() &&
- (Ty->isArrayType() || Ty->isRecordType()) && Ty->isPODType() &&
+ (Ty->isArrayType() || Ty->isRecordType()) &&
+ (Ty.isPODType(getContext()) ||
+ getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
D.getInit()->isConstantInitializer(getContext(), false)) {
// If the variable's a const type, and it's neither an NRVO
@@ -585,10 +806,6 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
DeclPtr = CreateStaticVarDecl(D, Class,
llvm::GlobalValue::InternalLinkage);
}
-
- // FIXME: Can this happen?
- if (Ty->isVariablyModifiedType())
- EmitVLASize(Ty);
} else {
EnsureInsertPoint();
@@ -608,19 +825,17 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
}
- // Get the element type.
- const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
- const llvm::Type *LElemPtrTy =
- LElemTy->getPointerTo(CGM.getContext().getTargetAddressSpace(Ty));
+ llvm::Value *elementCount;
+ QualType elementType;
+ llvm::tie(elementCount, elementType) = getVLASize(Ty);
- llvm::Value *VLASize = EmitVLASize(Ty);
+ const llvm::Type *llvmTy = ConvertTypeForMem(elementType);
// Allocate memory for the array.
- llvm::AllocaInst *VLA =
- Builder.CreateAlloca(llvm::Type::getInt8Ty(getLLVMContext()), VLASize, "vla");
- VLA->setAlignment(alignment.getQuantity());
+ llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla");
+ vla->setAlignment(alignment.getQuantity());
- DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
+ DeclPtr = vla;
}
llvm::Value *&DMEntry = LocalDeclMap[&D];
@@ -667,6 +882,21 @@ static bool isCapturedBy(const VarDecl &var, const Expr *e) {
return false;
}
+/// \brief Determine whether the given initializer is trivial in the sense
+/// that it requires no code to be generated.
+static bool isTrivialInitializer(const Expr *Init) {
+ if (!Init)
+ return true;
+
+ if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
+ if (CXXConstructorDecl *Constructor = Construct->getConstructor())
+ if (Constructor->isTrivial() &&
+ Constructor->isDefaultConstructor() &&
+ !Construct->requiresZeroInitialization())
+ return true;
+
+ return false;
+}
void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
assert(emission.Variable && "emission was not valid!");
@@ -690,7 +920,9 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
if (emission.IsByRef)
emitByrefStructureInit(emission);
- if (!Init) return;
+ if (isTrivialInitializer(Init))
+ return;
+
CharUnits alignment = emission.Alignment;
@@ -702,8 +934,11 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::Value *Loc =
capturedByInit ? emission.Address : emission.getObjectAddress(*this);
- if (!emission.IsConstantAggregate)
- return EmitExprAsInit(Init, &D, Loc, alignment, capturedByInit);
+ if (!emission.IsConstantAggregate) {
+ LValue lv = MakeAddrLValue(Loc, type, alignment.getQuantity());
+ lv.setNonGC(true);
+ return EmitExprAsInit(Init, &D, lv, capturedByInit);
+ }
// If this is a simple aggregate initialization, we can optimize it
// in various ways.
@@ -765,32 +1000,87 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
/// \param capturedByInit true if the variable is a __block variable
/// whose address is potentially changed by the initializer
void CodeGenFunction::EmitExprAsInit(const Expr *init,
- const VarDecl *var,
- llvm::Value *loc,
- CharUnits alignment,
+ const ValueDecl *D,
+ LValue lvalue,
bool capturedByInit) {
- QualType type = var->getType();
- bool isVolatile = type.isVolatileQualified();
+ QualType type = D->getType();
if (type->isReferenceType()) {
- RValue RV = EmitReferenceBindingToExpr(init, var);
- if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
- EmitStoreOfScalar(RV.getScalarVal(), loc, false,
- alignment.getQuantity(), type);
+ RValue rvalue = EmitReferenceBindingToExpr(init, D);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreThroughLValue(rvalue, lvalue);
} else if (!hasAggregateLLVMType(type)) {
- llvm::Value *V = EmitScalarExpr(init);
- if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
- EmitStoreOfScalar(V, loc, isVolatile, alignment.getQuantity(), type);
+ EmitScalarInit(init, D, lvalue, capturedByInit);
} else if (type->isAnyComplexType()) {
ComplexPairTy complex = EmitComplexExpr(init);
- if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
- StoreComplexToAddr(complex, loc, isVolatile);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ StoreComplexToAddr(complex, lvalue.getAddress(), lvalue.isVolatile());
} else {
// TODO: how can we delay here if D is captured by its initializer?
- EmitAggExpr(init, AggValueSlot::forAddr(loc, isVolatile, true, false));
+ EmitAggExpr(init, AggValueSlot::forLValue(lvalue, true, false));
}
}
+/// Enter a destroy cleanup for the given local variable.
+void CodeGenFunction::emitAutoVarTypeCleanup(
+ const CodeGenFunction::AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind) {
+ assert(dtorKind != QualType::DK_none);
+
+ // Note that for __block variables, we want to destroy the
+ // original stack object, not the possibly forwarded object.
+ llvm::Value *addr = emission.getObjectAddress(*this);
+
+ const VarDecl *var = emission.Variable;
+ QualType type = var->getType();
+
+ CleanupKind cleanupKind = NormalAndEHCleanup;
+ CodeGenFunction::Destroyer *destroyer = 0;
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ llvm_unreachable("no cleanup for trivially-destructible variable");
+
+ case QualType::DK_cxx_destructor:
+ // If there's an NRVO flag on the emission, we need a different
+ // cleanup.
+ if (emission.NRVOFlag) {
+ assert(!type->isArrayType());
+ CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
+ EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr, dtor,
+ emission.NRVOFlag);
+ return;
+ }
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ // Suppress cleanups for pseudo-strong variables.
+ if (var->isARCPseudoStrong()) return;
+
+ // Otherwise, consider whether to use an EH cleanup or not.
+ cleanupKind = getARCCleanupKind();
+
+ // Use the imprecise destroyer by default.
+ if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
+ destroyer = CodeGenFunction::destroyARCStrongImprecise;
+ break;
+
+ case QualType::DK_objc_weak_lifetime:
+ break;
+ }
+
+ // If we haven't chosen a more specific destroyer, use the default.
+ if (!destroyer) destroyer = &getDestroyer(dtorKind);
+
+ // Use an EH cleanup in array destructors iff the destructor itself
+ // is being pushed as an EH cleanup.
+ bool useEHCleanup = (cleanupKind & EHCleanup);
+ EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
+ useEHCleanup);
+}
+
void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
assert(emission.Variable && "emission was not valid!");
@@ -799,35 +1089,14 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
const VarDecl &D = *emission.Variable;
- // Handle C++ destruction of variables.
- if (getLangOptions().CPlusPlus) {
- QualType type = D.getType();
- QualType baseType = getContext().getBaseElementType(type);
- if (const RecordType *RT = baseType->getAs<RecordType>()) {
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (!ClassDecl->hasTrivialDestructor()) {
- // Note: We suppress the destructor call when the corresponding NRVO
- // flag has been set.
-
- // Note that for __block variables, we want to destroy the
- // original stack object, not the possible forwarded object.
- llvm::Value *Loc = emission.getObjectAddress(*this);
-
- const CXXDestructorDecl *D = ClassDecl->getDestructor();
- assert(D && "EmitLocalBlockVarDecl - destructor is nul");
-
- if (type != baseType) {
- const ConstantArrayType *Array =
- getContext().getAsConstantArrayType(type);
- assert(Array && "types changed without array?");
- EHStack.pushCleanup<CallArrayDtor>(NormalAndEHCleanup,
- D, Array, Loc);
- } else {
- EHStack.pushCleanup<CallVarDtor>(NormalAndEHCleanup,
- D, emission.NRVOFlag, Loc);
- }
- }
- }
+ // Check the type for a cleanup.
+ if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
+ emitAutoVarTypeCleanup(emission, dtorKind);
+
+ // In GC mode, honor objc_precise_lifetime.
+ if (getLangOptions().getGCMode() != LangOptions::NonGC &&
+ D.hasAttr<ObjCPreciseLifetimeAttr>()) {
+ EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
}
// Handle the cleanup attribute.
@@ -847,6 +1116,271 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
enterByrefCleanup(emission);
}
+CodeGenFunction::Destroyer &
+CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
+ // This is surprisingly compiler-dependent. GCC 4.2 can't bind
+ // references to functions directly in returns, and using '*&foo'
+ // confuses MSVC. Luckily, the following code pattern works in both.
+ Destroyer *destroyer = 0;
+ switch (kind) {
+ case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
+ case QualType::DK_cxx_destructor:
+ destroyer = &destroyCXXObject;
+ break;
+ case QualType::DK_objc_strong_lifetime:
+ destroyer = &destroyARCStrongPrecise;
+ break;
+ case QualType::DK_objc_weak_lifetime:
+ destroyer = &destroyARCWeak;
+ break;
+ }
+ return *destroyer;
+}
+
+/// pushDestroy - Push the standard destructor for the given type.
+void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
+ llvm::Value *addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
+ cleanupKind & EHCleanup);
+}
+
+void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
+ QualType type, Destroyer &destroyer,
+ bool useEHCleanupForArray) {
+ pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
+ destroyer, useEHCleanupForArray);
+}
+
+/// emitDestroy - Immediately perform the destruction of the given
+/// object.
+///
+/// \param addr - the address of the object; a type*
+/// \param type - the type of the object; if an array type, all
+/// objects are destroyed in reverse order
+/// \param destroyer - the function to call to destroy individual
+/// elements
+/// \param useEHCleanupForArray - whether an EH cleanup should be
+/// used when destroying array elements, in case one of the
+/// destructions throws an exception
+void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
+ Destroyer &destroyer,
+ bool useEHCleanupForArray) {
+ const ArrayType *arrayType = getContext().getAsArrayType(type);
+ if (!arrayType)
+ return destroyer(*this, addr, type);
+
+ llvm::Value *begin = addr;
+ llvm::Value *length = emitArrayLength(arrayType, type, begin);
+
+ // Normally we have to check whether the array is zero-length.
+ bool checkZeroLength = true;
+
+ // But if the array length is constant, we can suppress that.
+ if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
+ // ...and if it's constant zero, we can just skip the entire thing.
+ if (constLength->isZero()) return;
+ checkZeroLength = false;
+ }
+
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
+ emitArrayDestroy(begin, end, type, destroyer,
+ checkZeroLength, useEHCleanupForArray);
+}
+
+/// emitArrayDestroy - Destroys all the elements of the given array,
+/// beginning from last to first. The array cannot be zero-length.
+///
+/// \param begin - a type* denoting the first element of the array
+/// \param end - a type* denoting one past the end of the array
+/// \param type - the element type of the array
+/// \param destroyer - the function to call to destroy elements
+/// \param useEHCleanup - whether to push an EH cleanup to destroy
+/// the remaining elements in case the destruction of a single
+/// element throws
+void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
+ llvm::Value *end,
+ QualType type,
+ Destroyer &destroyer,
+ bool checkZeroLength,
+ bool useEHCleanup) {
+ assert(!type->isArrayType());
+
+ // The basic structure here is a do-while loop, because we don't
+ // need to check for the zero-element case.
+ llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
+ llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
+
+ if (checkZeroLength) {
+ llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
+ "arraydestroy.isempty");
+ Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
+ }
+
+ // Enter the loop body, making that address the current address.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ EmitBlock(bodyBB);
+ llvm::PHINode *elementPast =
+ Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
+ elementPast->addIncoming(end, entryBB);
+
+ // Shift the address back by one element.
+ llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
+ llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
+ "arraydestroy.element");
+
+ if (useEHCleanup)
+ pushRegularPartialArrayCleanup(begin, element, type, destroyer);
+
+ // Perform the actual destruction there.
+ destroyer(*this, element, type);
+
+ if (useEHCleanup)
+ PopCleanupBlock();
+
+ // Check whether we've reached the end.
+ llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
+ Builder.CreateCondBr(done, doneBB, bodyBB);
+ elementPast->addIncoming(element, Builder.GetInsertBlock());
+
+ // Done.
+ EmitBlock(doneBB);
+}
+
+/// Perform partial array destruction as if in an EH cleanup. Unlike
+/// emitArrayDestroy, the element type here may still be an array type.
+static void emitPartialArrayDestroy(CodeGenFunction &CGF,
+ llvm::Value *begin, llvm::Value *end,
+ QualType type,
+ CodeGenFunction::Destroyer &destroyer) {
+ // If the element type is itself an array, drill down.
+ unsigned arrayDepth = 0;
+ while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
+ // VLAs don't require a GEP index to walk into.
+ if (!isa<VariableArrayType>(arrayType))
+ arrayDepth++;
+ type = arrayType->getElementType();
+ }
+
+ if (arrayDepth) {
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, arrayDepth+1);
+
+ llvm::SmallVector<llvm::Value*,4> gepIndices(arrayDepth, zero);
+ begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices.begin(),
+ gepIndices.end(), "pad.arraybegin");
+ end = CGF.Builder.CreateInBoundsGEP(end, gepIndices.begin(),
+ gepIndices.end(), "pad.arrayend");
+ }
+
+ // Destroy the array. We don't ever need an EH cleanup because we
+ // assume that we're in an EH cleanup ourselves, so a throwing
+ // destructor causes an immediate terminate.
+ CGF.emitArrayDestroy(begin, end, type, destroyer,
+ /*checkZeroLength*/ true, /*useEHCleanup*/ false);
+}
+
+namespace {
+ /// RegularPartialArrayDestroy - a cleanup which performs a partial
+ /// array destroy where the end pointer is regularly determined and
+ /// does not need to be loaded from a local.
+ class RegularPartialArrayDestroy : public EHScopeStack::Cleanup {
+ llvm::Value *ArrayBegin;
+ llvm::Value *ArrayEnd;
+ QualType ElementType;
+ CodeGenFunction::Destroyer &Destroyer;
+ public:
+ RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
+ QualType elementType,
+ CodeGenFunction::Destroyer *destroyer)
+ : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
+ ElementType(elementType), Destroyer(*destroyer) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
+ ElementType, Destroyer);
+ }
+ };
+
+ /// IrregularPartialArrayDestroy - a cleanup which performs a
+ /// partial array destroy where the end pointer is irregularly
+ /// determined and must be loaded from a local.
+ class IrregularPartialArrayDestroy : public EHScopeStack::Cleanup {
+ llvm::Value *ArrayBegin;
+ llvm::Value *ArrayEndPointer;
+ QualType ElementType;
+ CodeGenFunction::Destroyer &Destroyer;
+ public:
+ IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ CodeGenFunction::Destroyer *destroyer)
+ : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
+ ElementType(elementType), Destroyer(*destroyer) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
+ emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
+ ElementType, Destroyer);
+ }
+ };
+}
+
+/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
+/// already-constructed elements of the given array. The cleanup
+/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+///
+/// \param elementType - the immediate element type of the array;
+/// possibly still an array type
+/// \param array - a value of type elementType*
+/// \param destructionKind - the kind of destruction required
+/// \param initializedElementCount - a value of type size_t* holding
+/// the number of successfully-constructed elements
+void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ Destroyer &destroyer) {
+ pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
+ arrayBegin, arrayEndPointer,
+ elementType, &destroyer);
+}
+
+/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
+/// already-constructed elements of the given array. The cleanup
+/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+///
+/// \param elementType - the immediate element type of the array;
+/// possibly still an array type
+/// \param array - a value of type elementType*
+/// \param destructionKind - the kind of destruction required
+/// \param initializedElementCount - a value of type size_t* holding
+/// the number of successfully-constructed elements
+void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEnd,
+ QualType elementType,
+ Destroyer &destroyer) {
+ pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
+ arrayBegin, arrayEnd,
+ elementType, &destroyer);
+}
+
+namespace {
+ /// A cleanup to perform a release of an object at the end of a
+ /// function. This is used to balance out the incoming +1 of a
+ /// ns_consumed argument when we can't reasonably do that just by
+ /// not doing the initial retain for a __block argument.
+ struct ConsumeARCParameter : EHScopeStack::Cleanup {
+ ConsumeARCParameter(llvm::Value *param) : Param(param) {}
+
+ llvm::Value *Param;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitARCRelease(Param, /*precise*/ false);
+ }
+ };
+}
+
/// Emit an alloca (or GlobalValue depending on target)
/// for the specified parameter and set up LocalDeclMap.
void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
@@ -883,10 +1417,56 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// Otherwise, create a temporary to hold the value.
DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
+ bool doStore = true;
+
+ Qualifiers qs = Ty.getQualifiers();
+
+ if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
+ // We honor __attribute__((ns_consumed)) for types with lifetime.
+ // For __strong, it's handled by just skipping the initial retain;
+ // otherwise we have to balance out the initial +1 with an extra
+ // cleanup to do the release at the end of the function.
+ bool isConsumed = D.hasAttr<NSConsumedAttr>();
+
+ // 'self' is always formally __strong, but if this is not an
+ // init method then we don't want to retain it.
+ if (D.isARCPseudoStrong()) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl);
+ assert(&D == method->getSelfDecl());
+ assert(lt == Qualifiers::OCL_Strong);
+ assert(qs.hasConst());
+ assert(method->getMethodFamily() != OMF_init);
+ (void) method;
+ lt = Qualifiers::OCL_ExplicitNone;
+ }
+
+ if (lt == Qualifiers::OCL_Strong) {
+ if (!isConsumed)
+ // Don't use objc_retainBlock for block pointers, because we
+ // don't want to Block_copy something just because we got it
+ // as a parameter.
+ Arg = EmitARCRetainNonBlock(Arg);
+ } else {
+ // Push the cleanup for a consumed parameter.
+ if (isConsumed)
+ EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg);
+
+ if (lt == Qualifiers::OCL_Weak) {
+ EmitARCInitWeak(DeclPtr, Arg);
+ doStore = false; // The weak init is a store, no need to do two
+ }
+ }
+
+ // Enter the cleanup scope.
+ EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
+ }
+
// Store the initial value into the alloca.
- EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(),
- getContext().getDeclAlign(&D).getQuantity(), Ty,
- CGM.getTBAAInfo(Ty));
+ if (doStore) {
+ LValue lv = MakeAddrLValue(DeclPtr, Ty,
+ getContext().getDeclAlign(&D).getQuantity());
+ EmitStoreOfScalar(Arg, lv);
+ }
}
llvm::Value *&DMEntry = LocalDeclMap[&D];
@@ -894,8 +1474,6 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
DMEntry = DeclPtr;
// Emit debug info for param declaration.
- if (CGDebugInfo *DI = getDebugInfo()) {
- DI->setLocation(D.getLocation());
+ if (CGDebugInfo *DI = getDebugInfo())
DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
- }
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 178badd..0ae6a3d 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -27,68 +27,76 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
"Should not call EmitDeclInit on a reference!");
ASTContext &Context = CGF.getContext();
-
- const Expr *Init = D.getInit();
- QualType T = D.getType();
- bool isVolatile = Context.getCanonicalType(T).isVolatileQualified();
- unsigned Alignment = Context.getDeclAlign(&D).getQuantity();
- if (!CGF.hasAggregateLLVMType(T)) {
- llvm::Value *V = CGF.EmitScalarExpr(Init);
+ unsigned alignment = Context.getDeclAlign(&D).getQuantity();
+ QualType type = D.getType();
+ LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
+
+ const Expr *Init = D.getInit();
+ if (!CGF.hasAggregateLLVMType(type)) {
CodeGenModule &CGM = CGF.CGM;
- Qualifiers::GC GCAttr = CGM.getContext().getObjCGCAttrKind(T);
- if (GCAttr == Qualifiers::Strong)
- CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, V, DeclPtr,
- D.isThreadSpecified());
- else if (GCAttr == Qualifiers::Weak)
- CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, V, DeclPtr);
+ if (lv.isObjCStrong())
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr, D.isThreadSpecified());
+ else if (lv.isObjCWeak())
+ CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr);
else
- CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, Alignment, T);
- } else if (T->isAnyComplexType()) {
- CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
+ CGF.EmitScalarInit(Init, &D, lv, false);
+ } else if (type->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, DeclPtr, lv.isVolatile());
} else {
- CGF.EmitAggExpr(Init, AggValueSlot::forAddr(DeclPtr, isVolatile, true));
+ CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv, true));
}
}
/// Emit code to cause the destruction of the given variable with
/// static storage duration.
static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *DeclPtr) {
+ llvm::Constant *addr) {
CodeGenModule &CGM = CGF.CGM;
- ASTContext &Context = CGF.getContext();
-
- QualType T = D.getType();
-
- // Drill down past array types.
- const ConstantArrayType *Array = Context.getAsConstantArrayType(T);
- if (Array)
- T = Context.getBaseElementType(Array);
+
+ // FIXME: __attribute__((cleanup)) ?
- /// If that's not a record, we're done.
- /// FIXME: __attribute__((cleanup)) ?
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
+ QualType type = D.getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+
+ switch (dtorKind) {
+ case QualType::DK_none:
return;
-
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->hasTrivialDestructor())
+
+ case QualType::DK_cxx_destructor:
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ // We don't care about releasing objects during process teardown.
return;
-
- CXXDestructorDecl *Dtor = RD->getDestructor();
-
- llvm::Constant *DtorFn;
- if (Array) {
- DtorFn =
- CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor, Array,
- DeclPtr);
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
- } else
- DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
-
- CGF.EmitCXXGlobalDtorRegistration(DtorFn, DeclPtr);
+ }
+
+ llvm::Constant *function;
+ llvm::Constant *argument;
+
+ // Special-case non-array C++ destructors, where there's a function
+ // with the right signature that we can just call.
+ const CXXRecordDecl *record = 0;
+ if (dtorKind == QualType::DK_cxx_destructor &&
+ (record = type->getAsCXXRecordDecl())) {
+ assert(!record->hasTrivialDestructor());
+ CXXDestructorDecl *dtor = record->getDestructor();
+
+ function = CGM.GetAddrOfCXXDestructor(dtor, Dtor_Complete);
+ argument = addr;
+
+ // Otherwise, the standard logic requires a helper function.
+ } else {
+ function = CodeGenFunction(CGM).generateDestroyHelper(addr, type,
+ CGF.getDestroyer(dtorKind),
+ CGF.needsEHCleanup(dtorKind));
+ argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
+ }
+
+ CGF.EmitCXXGlobalDtorRegistration(function, argument);
}
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
@@ -118,12 +126,13 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
}
// Get the destructor function type
- const llvm::Type *DtorFnTy =
+ llvm::Type *ArgTys[] = { Int8PtrTy };
+ llvm::Type *DtorFnTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- Int8PtrTy, false);
+ ArgTys, false);
DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
- const llvm::Type *Params[] = { DtorFnTy, Int8PtrTy, Int8PtrTy };
+ llvm::Type *Params[] = { DtorFnTy, Int8PtrTy, Int8PtrTy };
// Get the __cxa_atexit function type
// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
@@ -140,7 +149,7 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
llvm::Value *Args[3] = { llvm::ConstantExpr::getBitCast(DtorFn, DtorFnTy),
llvm::ConstantExpr::getBitCast(DeclPtr, Int8PtrTy),
llvm::ConstantExpr::getBitCast(Handle, Int8PtrTy) };
- Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
+ Builder.CreateCall(AtExitFn, Args);
}
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
@@ -270,12 +279,11 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
getTypes().getNullaryFunctionInfo(),
FunctionArgList(), SourceLocation());
- // Use guarded initialization if the global variable is weak due to
- // being a class template's static data member. These will always
- // have weak_odr linkage.
- if (Addr->getLinkage() == llvm::GlobalValue::WeakODRLinkage &&
- D->isStaticDataMember() &&
- D->getInstantiatedFromStaticDataMember()) {
+ // Use guarded initialization if the global variable is weak. This
+ // occurs for, e.g., instantiated static data members and
+ // definitions explicitly marked weak.
+ if (Addr->getLinkage() == llvm::GlobalValue::WeakODRLinkage ||
+ Addr->getLinkage() == llvm::GlobalValue::WeakAnyLinkage) {
EmitCXXGuardedInit(*D, Addr);
} else {
EmitCXXGlobalVarDeclInit(*D, Addr);
@@ -291,10 +299,21 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
getTypes().getNullaryFunctionInfo(),
FunctionArgList(), SourceLocation());
+ RunCleanupsScope Scope(*this);
+
+ // When building in Objective-C++ ARC mode, create an autorelease pool
+ // around the global initializers.
+ if (getLangOptions().ObjCAutoRefCount && getLangOptions().CPlusPlus) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EmitObjCAutoreleasePoolCleanup(token);
+ }
+
for (unsigned i = 0; i != NumDecls; ++i)
if (Decls[i])
- Builder.CreateCall(Decls[i]);
+ Builder.CreateCall(Decls[i]);
+ Scope.ForceCleanup();
+
FinishFunction();
}
@@ -318,13 +337,13 @@ void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
FinishFunction();
}
-/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
-/// invoked, calls the default destructor on array elements in reverse order of
-/// construction.
+/// generateDestroyHelper - Generates a helper function which, when
+/// invoked, destroys the given object.
llvm::Function *
-CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This) {
+CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
+ QualType type,
+ Destroyer &destroyer,
+ bool useEHCleanupForArray) {
FunctionArgList args;
ImplicitParamDecl dst(0, SourceLocation(), 0, getContext().VoidPtrTy);
args.push_back(&dst);
@@ -333,19 +352,16 @@ CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
CGM.getTypes().getFunctionInfo(getContext().VoidTy, args,
FunctionType::ExtInfo());
const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
- llvm::Function *Fn =
+ llvm::Function *fn =
CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
- StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FI, args,
+ StartFunction(GlobalDecl(), getContext().VoidTy, fn, FI, args,
SourceLocation());
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy)->getPointerTo();
- llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
-
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+ emitDestroy(addr, type, destroyer, useEHCleanupForArray);
FinishFunction();
- return Fn;
+ return fn;
}
+
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index e8ad6da..418bea6 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -29,10 +29,9 @@ using namespace CodeGen;
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
- const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Type *ArgTys[] = { CGF.SizeTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getInt8PtrTy(CGF.getLLVMContext()),
- SizeTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
}
@@ -40,10 +39,9 @@ static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) {
// void __cxa_free_exception(void *thrown_exception);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
}
@@ -52,11 +50,9 @@ static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
// void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
// void (*dest) (void *));
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *Args[3] = { Int8PtrTy, Int8PtrTy, Int8PtrTy };
+ llvm::Type *Args[3] = { CGF.Int8PtrTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Args, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, Args, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
}
@@ -65,8 +61,7 @@ static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
// void __cxa_rethrow();
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
}
@@ -74,9 +69,9 @@ static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
// void *__cxa_get_exception_ptr(void*);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(Int8PtrTy, Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
}
@@ -84,9 +79,9 @@ static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
// void *__cxa_begin_catch(void*);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(Int8PtrTy, Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
}
@@ -95,8 +90,7 @@ static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
// void __cxa_end_catch();
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
}
@@ -104,17 +98,17 @@ static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
// void __cxa_call_unexepcted(void *thrown_exception);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
}
llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
+ llvm::Type *ArgTys[] = { Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false);
if (CGM.getLangOptions().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
@@ -122,8 +116,9 @@ llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
}
llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
+ llvm::Type *ArgTys[] = { Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false);
if (CGM.getLangOptions().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
@@ -134,20 +129,26 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
// void __terminate();
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
- return CGF.CGM.CreateRuntimeFunction(FTy,
- CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort");
+ llvm::StringRef name;
+
+ // In C++, use std::terminate().
+ if (CGF.getLangOptions().CPlusPlus)
+ name = "_ZSt9terminatev"; // FIXME: mangling!
+ else if (CGF.getLangOptions().ObjC1 &&
+ CGF.CGM.getCodeGenOpts().ObjCRuntimeHasTerminate)
+ name = "objc_terminate";
+ else
+ name = "abort";
+ return CGF.CGM.CreateRuntimeFunction(FTy, name);
}
static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
llvm::StringRef Name) {
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- const llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, Int8PtrTy,
- /*IsVarArgs=*/false);
+ llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, Name);
}
@@ -322,9 +323,10 @@ static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
namespace {
/// A cleanup to free the exception object if its initialization
/// throws.
- struct FreeException {
- static void Emit(CodeGenFunction &CGF, bool forEH,
- llvm::Value *exn) {
+ struct FreeException : EHScopeStack::Cleanup {
+ llvm::Value *exn;
+ FreeException(llvm::Value *exn) : exn(exn) {}
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.Builder.CreateCall(getFreeExceptionFn(CGF), exn)
->setDoesNotThrow();
}
@@ -354,7 +356,8 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
// evaluated but before the exception is caught. But the best way
// to handle that is to teach EmitAggExpr to do the final copy
// differently if it can't be elided.
- CGF.EmitAnyExprToMem(e, typedAddr, /*Volatile*/ false, /*IsInit*/ true);
+ CGF.EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(),
+ /*IsInit*/ true);
// Deactivate the cleanup block.
CGF.DeactivateCleanupBlock(cleanup);
@@ -407,7 +410,6 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr);
// Now throw the exception.
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
/*ForEH=*/true);
@@ -786,7 +788,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Tell the backend how to generate the landing pad.
llvm::CallInst *Selection =
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
- EHSelector.begin(), EHSelector.end(), "eh.selector");
+ EHSelector, "eh.selector");
Selection->setDoesNotThrow();
// Save the selector value in mandatory-cleanup mode.
@@ -920,13 +922,13 @@ namespace {
CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
bool MightThrow;
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
if (!MightThrow) {
CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow();
return;
}
- CGF.EmitCallOrInvoke(getEndCatchFn(CGF), 0, 0);
+ CGF.EmitCallOrInvoke(getEndCatchFn(CGF));
}
};
}
@@ -1084,7 +1086,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.EHStack.pushTerminate();
// Perform the copy construction.
- CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, false, false));
+ CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, Qualifiers(),
+ false));
// Leave the terminate scope.
CGF.EHStack.popTerminate();
@@ -1137,8 +1140,8 @@ static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) {
namespace {
struct CallRethrow : EHScopeStack::Cleanup {
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- CGF.EmitCallOrInvoke(getReThrowFn(CGF), 0, 0);
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitCallOrInvoke(getReThrowFn(CGF));
}
};
}
@@ -1209,7 +1212,7 @@ namespace {
CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
: ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
llvm::BasicBlock *CleanupContBB =
CGF.createBasicBlock("finally.cleanup.cont");
@@ -1218,7 +1221,7 @@ namespace {
CGF.Builder.CreateLoad(ForEHVar, "finally.endcatch");
CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
CGF.EmitBlock(EndCatchBB);
- CGF.EmitCallOrInvoke(EndCatchFn, 0, 0); // catch-all, so might throw
+ CGF.EmitCallOrInvoke(EndCatchFn); // catch-all, so might throw
CGF.EmitBlock(CleanupContBB);
}
};
@@ -1236,7 +1239,7 @@ namespace {
: Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
// Enter a cleanup to call the end-catch function if one was provided.
if (EndCatchFn)
CGF.EHStack.pushCleanup<CallEndCatchForFinally>(NormalAndEHCleanup,
@@ -1263,10 +1266,9 @@ namespace {
CGF.EmitBlock(RethrowBB);
if (SavedExnVar) {
- llvm::Value *Args[] = { CGF.Builder.CreateLoad(SavedExnVar) };
- CGF.EmitCallOrInvoke(RethrowFn, Args, Args+1);
+ CGF.EmitCallOrInvoke(RethrowFn, CGF.Builder.CreateLoad(SavedExnVar));
} else {
- CGF.EmitCallOrInvoke(RethrowFn, 0, 0);
+ CGF.EmitCallOrInvoke(RethrowFn);
}
CGF.Builder.CreateUnreachable();
@@ -1296,14 +1298,16 @@ namespace {
/// Enters a finally block for an implementation using zero-cost
/// exceptions. This is mostly general, but hard-codes some
/// language/ABI-specific behavior in the catch-all sections.
-CodeGenFunction::FinallyInfo
-CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
- llvm::Constant *BeginCatchFn,
- llvm::Constant *EndCatchFn,
- llvm::Constant *RethrowFn) {
- assert((BeginCatchFn != 0) == (EndCatchFn != 0) &&
+void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
+ const Stmt *body,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *rethrowFn) {
+ assert((beginCatchFn != 0) == (endCatchFn != 0) &&
"begin/end catch functions not paired");
- assert(RethrowFn && "rethrow function is required");
+ assert(rethrowFn && "rethrow function is required");
+
+ BeginCatchFn = beginCatchFn;
// The rethrow function has one of the following two types:
// void (*)()
@@ -1311,13 +1315,12 @@ CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
// In the latter case we need to pass it the exception object.
// But we can't use the exception slot because the @finally might
// have a landing pad (which would overwrite the exception slot).
- const llvm::FunctionType *RethrowFnTy =
+ const llvm::FunctionType *rethrowFnTy =
cast<llvm::FunctionType>(
- cast<llvm::PointerType>(RethrowFn->getType())
- ->getElementType());
- llvm::Value *SavedExnVar = 0;
- if (RethrowFnTy->getNumParams())
- SavedExnVar = CreateTempAlloca(Builder.getInt8PtrTy(), "finally.exn");
+ cast<llvm::PointerType>(rethrowFn->getType())->getElementType());
+ SavedExnVar = 0;
+ if (rethrowFnTy->getNumParams())
+ SavedExnVar = CGF.CreateTempAlloca(CGF.Int8PtrTy, "finally.exn");
// A finally block is a statement which must be executed on any edge
// out of a given scope. Unlike a cleanup, the finally block may
@@ -1331,67 +1334,64 @@ CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
// The finally block itself is generated in the context of a cleanup
// which conditionally leaves the catch-all.
- FinallyInfo Info;
-
// Jump destination for performing the finally block on an exception
// edge. We'll never actually reach this block, so unreachable is
// fine.
- JumpDest RethrowDest = getJumpDestInCurrentScope(getUnreachableBlock());
+ RethrowDest = CGF.getJumpDestInCurrentScope(CGF.getUnreachableBlock());
// Whether the finally block is being executed for EH purposes.
- llvm::AllocaInst *ForEHVar = CreateTempAlloca(Builder.getInt1Ty(),
- "finally.for-eh");
- InitTempAlloca(ForEHVar, llvm::ConstantInt::getFalse(getLLVMContext()));
+ ForEHVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "finally.for-eh");
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), ForEHVar);
// Enter a normal cleanup which will perform the @finally block.
- EHStack.pushCleanup<PerformFinally>(NormalCleanup, Body,
- ForEHVar, EndCatchFn,
- RethrowFn, SavedExnVar);
+ CGF.EHStack.pushCleanup<PerformFinally>(NormalCleanup, body,
+ ForEHVar, endCatchFn,
+ rethrowFn, SavedExnVar);
// Enter a catch-all scope.
- llvm::BasicBlock *CatchAllBB = createBasicBlock("finally.catchall");
- CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
- Builder.SetInsertPoint(CatchAllBB);
-
- // If there's a begin-catch function, call it.
- if (BeginCatchFn) {
- Builder.CreateCall(BeginCatchFn, Builder.CreateLoad(getExceptionSlot()))
- ->setDoesNotThrow();
- }
-
- // If we need to remember the exception pointer to rethrow later, do so.
- if (SavedExnVar) {
- llvm::Value *SavedExn = Builder.CreateLoad(getExceptionSlot());
- Builder.CreateStore(SavedExn, SavedExnVar);
- }
+ llvm::BasicBlock *catchBB = CGF.createBasicBlock("finally.catchall");
+ EHCatchScope *catchScope = CGF.EHStack.pushCatch(1);
+ catchScope->setCatchAllHandler(0, catchBB);
+}
- // Tell the finally block that we're in EH.
- Builder.CreateStore(llvm::ConstantInt::getTrue(getLLVMContext()), ForEHVar);
+void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
+ // Leave the finally catch-all.
+ EHCatchScope &catchScope = cast<EHCatchScope>(*CGF.EHStack.begin());
+ llvm::BasicBlock *catchBB = catchScope.getHandler(0).Block;
+ CGF.EHStack.popCatch();
- // Thread a jump through the finally cleanup.
- EmitBranchThroughCleanup(RethrowDest);
+ // If there are any references to the catch-all block, emit it.
+ if (catchBB->use_empty()) {
+ delete catchBB;
+ } else {
+ CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(catchBB);
- Builder.restoreIP(SavedIP);
+ llvm::Value *exn = 0;
- EHCatchScope *CatchScope = EHStack.pushCatch(1);
- CatchScope->setCatchAllHandler(0, CatchAllBB);
+ // If there's a begin-catch function, call it.
+ if (BeginCatchFn) {
+ exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
+ CGF.Builder.CreateCall(BeginCatchFn, exn)->setDoesNotThrow();
+ }
- return Info;
-}
+ // If we need to remember the exception pointer to rethrow later, do so.
+ if (SavedExnVar) {
+ if (!exn) exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
+ CGF.Builder.CreateStore(exn, SavedExnVar);
+ }
-void CodeGenFunction::ExitFinallyBlock(FinallyInfo &Info) {
- // Leave the finally catch-all.
- EHCatchScope &Catch = cast<EHCatchScope>(*EHStack.begin());
- llvm::BasicBlock *CatchAllBB = Catch.getHandler(0).Block;
- EHStack.popCatch();
+ // Tell the cleanups in the finally block that we're do this for EH.
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), ForEHVar);
- // And leave the normal cleanup.
- PopCleanupBlock();
+ // Thread a jump through the finally cleanup.
+ CGF.EmitBranchThroughCleanup(RethrowDest);
- CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- EmitBlock(CatchAllBB, true);
+ CGF.Builder.restoreIP(savedIP);
+ }
- Builder.restoreIP(SavedIP);
+ // Finally, leave the @finally cleanup.
+ CGF.PopCleanupBlock();
}
llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
@@ -1416,7 +1416,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
llvm::Value *Args[3] = { Exn, getOpaquePersonalityFn(CGM, Personality),
getCatchAllValue(*this) };
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
- Args, Args+3, "eh.selector")
+ Args, "eh.selector")
->setDoesNotThrow();
llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 2f6b55b..a7e8003 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -20,8 +20,8 @@
#include "CGObjCRuntime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
-#include "llvm/Intrinsics.h"
#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Intrinsics.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
@@ -131,16 +131,16 @@ RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
/// location.
void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
llvm::Value *Location,
- bool IsLocationVolatile,
+ Qualifiers Quals,
bool IsInit) {
- if (E->getType()->isComplexType())
- EmitComplexExprIntoAddr(E, Location, IsLocationVolatile);
+ if (E->getType()->isAnyComplexType())
+ EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
else if (hasAggregateLLVMType(E->getType()))
- EmitAggExpr(E, AggValueSlot::forAddr(Location, IsLocationVolatile, IsInit));
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, IsInit));
else {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
LValue LV = MakeAddrLValue(Location, E->getType());
- EmitStoreThroughLValue(RV, LV, E->getType());
+ EmitStoreThroughLValue(RV, LV);
}
}
@@ -203,7 +203,24 @@ static llvm::Value *
EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
llvm::Value *&ReferenceTemporary,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
+ QualType &ObjCARCReferenceLifetimeType,
const NamedDecl *InitializedDecl) {
+ // Look through expressions for materialized temporaries (for now).
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ // Objective-C++ ARC:
+ // If we are binding a reference to a temporary that has ownership, we
+ // need to perform retain/release operations on the temporary.
+ if (CGF.getContext().getLangOptions().ObjCAutoRefCount &&
+ E->getType()->isObjCLifetimeType() &&
+ (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
+ E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
+ E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
+ ObjCARCReferenceLifetimeType = E->getType();
+
+ E = M->GetTemporaryExpr();
+ }
+
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
E = DAE->getExpr();
@@ -213,6 +230,7 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
ReferenceTemporary,
ReferenceTemporaryDtor,
+ ObjCARCReferenceLifetimeType,
InitializedDecl);
}
@@ -229,12 +247,69 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
RV = CGF.EmitLoadOfPropertyRefLValue(LV);
return RV.getScalarVal();
}
+
if (LV.isSimple())
return LV.getAddress();
// We have to load the lvalue.
- RV = CGF.EmitLoadOfLValue(LV, E->getType());
+ RV = CGF.EmitLoadOfLValue(LV);
} else {
+ if (!ObjCARCReferenceLifetimeType.isNull()) {
+ ReferenceTemporary = CreateReferenceTemporary(CGF,
+ ObjCARCReferenceLifetimeType,
+ InitializedDecl);
+
+
+ LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
+ ObjCARCReferenceLifetimeType);
+
+ CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
+ RefTempDst, false);
+
+ bool ExtendsLifeOfTemporary = false;
+ if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (Var->extendsLifetimeOfTemporary())
+ ExtendsLifeOfTemporary = true;
+ } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
+ ExtendsLifeOfTemporary = true;
+ }
+
+ if (!ExtendsLifeOfTemporary) {
+ // Since the lifetime of this temporary isn't going to be extended,
+ // we need to clean it up ourselves at the end of the full expression.
+ switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ assert(!ObjCARCReferenceLifetimeType->isArrayType());
+ CleanupKind cleanupKind = CGF.getARCCleanupKind();
+ CGF.pushDestroy(cleanupKind,
+ ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ CodeGenFunction::destroyARCStrongImprecise,
+ cleanupKind & EHCleanup);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak:
+ assert(!ObjCARCReferenceLifetimeType->isArrayType());
+ CGF.pushDestroy(NormalAndEHCleanup,
+ ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ CodeGenFunction::destroyARCWeak,
+ /*useEHCleanupForArray*/ true);
+ break;
+ }
+
+ ObjCARCReferenceLifetimeType = QualType();
+ }
+
+ return ReferenceTemporary;
+ }
+
llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
while (true) {
E = E->IgnoreParens();
@@ -279,12 +354,10 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
!E->getType()->isAnyComplexType()) {
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
InitializedDecl);
- AggSlot = AggValueSlot::forAddr(ReferenceTemporary, false,
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(),
InitializedDecl != 0);
}
-
- RV = CGF.EmitAnyExpr(E, AggSlot);
-
+
if (InitializedDecl) {
// Get the destructor for the reference temporary.
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
@@ -294,6 +367,8 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
}
}
+ RV = CGF.EmitAnyExpr(E, AggSlot);
+
// Check if need to perform derived-to-base casts and/or field accesses, to
// get from the temporary object we created (and, potentially, for which we
// extended the lifetime) to the subobject we're binding the reference to.
@@ -326,7 +401,7 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
LValue TempLV = CGF.MakeAddrLValue(Object,
Adjustment.Field->getType());
- CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV, T), TempLV, T);
+ CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
break;
}
@@ -361,26 +436,65 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
const NamedDecl *InitializedDecl) {
llvm::Value *ReferenceTemporary = 0;
const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
+ QualType ObjCARCReferenceLifetimeType;
llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
ReferenceTemporaryDtor,
+ ObjCARCReferenceLifetimeType,
InitializedDecl);
- if (!ReferenceTemporaryDtor)
+ if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
return RValue::get(Value);
// Make sure to call the destructor for the reference temporary.
- if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
- if (VD->hasGlobalStorage()) {
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
+ if (VD && VD->hasGlobalStorage()) {
+ if (ReferenceTemporaryDtor) {
llvm::Constant *DtorFn =
CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
EmitCXXGlobalDtorRegistration(DtorFn,
cast<llvm::Constant>(ReferenceTemporary));
-
- return RValue::get(Value);
+ } else {
+ assert(!ObjCARCReferenceLifetimeType.isNull());
+ // Note: We intentionally do not register a global "destructor" to
+ // release the object.
}
+
+ return RValue::get(Value);
}
- PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
-
+ if (ReferenceTemporaryDtor)
+ PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
+ else {
+ switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ assert(0 && "Not a reference temporary that needs to be deallocated");
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ // Nothing to do.
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
+ CleanupKind cleanupKind = getARCCleanupKind();
+ // This local is a GCC and MSVC compiler workaround.
+ Destroyer *destroyer = precise ? &destroyARCStrongPrecise :
+ &destroyARCStrongImprecise;
+ pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
+ *destroyer, cleanupKind & EHCleanup);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak: {
+ // This local is a GCC and MSVC compiler workaround.
+ Destroyer *destroyer = &destroyARCWeak;
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
+ ObjCARCReferenceLifetimeType, *destroyer, true);
+ break;
+ }
+ }
+ }
+
return RValue::get(Value);
}
@@ -402,8 +516,7 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
// This needs to be to the standard address space.
Address = Builder.CreateBitCast(Address, Int8PtrTy);
- const llvm::Type *IntPtrT = IntPtrTy;
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1);
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
// In time, people may want to control this and use a 1 here.
llvm::Value *Arg = Builder.getFalse();
@@ -592,6 +705,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
case Expr::OpaqueValueExprClass:
return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
case Expr::ImplicitCastExprClass:
case Expr::CStyleCastExprClass:
case Expr::CXXFunctionalCastExprClass:
@@ -599,10 +714,20 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CXXDynamicCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass:
return EmitCastLValue(cast<CastExpr>(E));
+
+ case Expr::MaterializeTemporaryExprClass:
+ return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
}
}
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
+ return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
+ lvalue.getAlignment(), lvalue.getType(),
+ lvalue.getTBAAInfo());
+}
+
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
unsigned Alignment, QualType Ty,
llvm::MDNode *TBAAInfo) {
@@ -651,6 +776,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
QualType Ty,
llvm::MDNode *TBAAInfo) {
Value = EmitToMemory(Value, Ty);
+
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
if (Alignment)
Store->setAlignment(Alignment);
@@ -658,29 +784,30 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
CGM.DecorateInstruction(Store, TBAAInfo);
}
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) {
+ EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
+ lvalue.getAlignment(), lvalue.getType(),
+ lvalue.getTBAAInfo());
+}
+
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
/// method emits the address of the lvalue, then loads the result as an rvalue,
/// returning the rvalue.
-RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
if (LV.isObjCWeak()) {
// load of a __weak object.
llvm::Value *AddrWeakObj = LV.getAddress();
return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
AddrWeakObj));
}
+ if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
+ return RValue::get(EmitARCLoadWeak(LV.getAddress()));
if (LV.isSimple()) {
- llvm::Value *Ptr = LV.getAddress();
-
- // Functions are l-values that don't require loading.
- if (ExprType->isFunctionType())
- return RValue::get(Ptr);
+ assert(!LV.getType()->isFunctionType());
// Everything needs a load.
- return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
- LV.getAlignment(), ExprType,
- LV.getTBAAInfo()));
-
+ return RValue::get(EmitLoadOfScalar(LV));
}
if (LV.isVectorElt()) {
@@ -693,21 +820,20 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
// If this is a reference to a subset of the elements of a vector, either
// shuffle the input or extract/insert them as appropriate.
if (LV.isExtVectorElt())
- return EmitLoadOfExtVectorElementLValue(LV, ExprType);
+ return EmitLoadOfExtVectorElementLValue(LV);
if (LV.isBitField())
- return EmitLoadOfBitfieldLValue(LV, ExprType);
+ return EmitLoadOfBitfieldLValue(LV);
assert(LV.isPropertyRef() && "Unknown LValue type!");
return EmitLoadOfPropertyRefLValue(LV);
}
-RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
- QualType ExprType) {
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
// Get the output type.
- const llvm::Type *ResLTy = ConvertType(ExprType);
+ const llvm::Type *ResLTy = ConvertType(LV.getType());
unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
// Compute the result as an OR of all of the individual component accesses.
@@ -733,7 +859,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
// Cast to the access type.
const llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
AI.AccessWidth,
- CGM.getContext().getTargetAddressSpace(ExprType));
+ CGM.getContext().getTargetAddressSpace(LV.getType()));
Ptr = Builder.CreateBitCast(Ptr, PTy);
// Perform the load.
@@ -777,8 +903,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
// If this is a reference to a subset of the elements of a vector, create an
// appropriate shufflevector.
-RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
- QualType ExprType) {
+RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
LV.isVolatileQualified(), "tmp");
@@ -786,7 +911,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
// If the result of the expression is a non-vector type, we must be extracting
// a single element. Just codegen as an extractelement.
- const VectorType *ExprVT = ExprType->getAs<VectorType>();
+ const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
if (!ExprVT) {
unsigned InIdx = getAccessedFieldNo(0, Elts);
llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
@@ -813,8 +938,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
-void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
- QualType Ty) {
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
@@ -829,15 +953,41 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
// If this is an update of extended vector elements, insert them as
// appropriate.
if (Dst.isExtVectorElt())
- return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty);
+ return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
if (Dst.isBitField())
- return EmitStoreThroughBitfieldLValue(Src, Dst, Ty);
+ return EmitStoreThroughBitfieldLValue(Src, Dst);
assert(Dst.isPropertyRef() && "Unknown LValue type");
return EmitStoreThroughPropertyRefLValue(Src, Dst);
}
+ // There's special magic for assigning into an ARC-qualified l-value.
+ if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
+ switch (Lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing special
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
+ Src.getScalarVal()));
+ // fall into the normal path
+ break;
+ }
+ }
+
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
// load of a __weak object.
llvm::Value *LvalueDst = Dst.getAddress();
@@ -871,24 +1021,21 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
}
assert(Src.isScalar() && "Can't emit an agg store with this method");
- EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
- Dst.isVolatileQualified(), Dst.getAlignment(), Ty,
- Dst.getTBAAInfo());
+ EmitStoreOfScalar(Src.getScalarVal(), Dst);
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
- QualType Ty,
llvm::Value **Result) {
const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
// Get the output type.
- const llvm::Type *ResLTy = ConvertTypeForMem(Ty);
+ const llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
- if (Ty->isBooleanType())
+ if (Dst.getType()->isBooleanType())
SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
@@ -983,8 +1130,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
}
void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
- LValue Dst,
- QualType Ty) {
+ LValue Dst) {
// This access turns into a read/modify/write of the vector. Load the input
// value now.
llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
@@ -993,7 +1139,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm::Value *SrcVal = Src.getScalarVal();
- if (const VectorType *VTy = Ty->getAs<VectorType>()) {
+ if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
unsigned NumSrcElts = VTy->getNumElements();
unsigned NumDstElts =
cast<llvm::VectorType>(Vec->getType())->getNumElements();
@@ -1113,7 +1259,12 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
return;
}
-
+
+ if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ return;
+ }
+
if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
if (LV.isObjCIvar() && !LV.isObjCArray())
@@ -1136,6 +1287,14 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
}
}
+static llvm::Value *
+EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
+ llvm::Value *V, llvm::Type *IRType,
+ llvm::StringRef Name = llvm::StringRef()) {
+ unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
+ return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
+}
+
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
const Expr *E, const VarDecl *VD) {
assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
@@ -1144,6 +1303,10 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
if (VD->getType()->isReferenceType())
V = CGF.Builder.CreateLoad(V, "tmp");
+
+ V = EmitBitCastOfLValueToProperType(CGF, V,
+ CGF.getTypes().ConvertTypeForMem(E->getType()));
+
unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity();
LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
setObjCGCLValueClass(CGF.getContext(), E, LV);
@@ -1151,7 +1314,7 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
}
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
- const Expr *E, const FunctionDecl *FD) {
+ const Expr *E, const FunctionDecl *FD) {
llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
@@ -1200,6 +1363,9 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (VD->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
+ V = EmitBitCastOfLValueToProperType(*this, V,
+ getTypes().ConvertTypeForMem(E->getType()));
+
LValue LV = MakeAddrLValue(V, E->getType(), Alignment);
if (NonGCable) {
LV.getQuals().removeObjCGCAttr();
@@ -1359,7 +1525,7 @@ llvm::BasicBlock *CodeGenFunction::getTrapBB() {
TrapBB = createBasicBlock("trap");
EmitBlock(TrapBB);
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0);
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
llvm::CallInst *TrapCall = Builder.CreateCall(F);
TrapCall->setDoesNotReturn();
TrapCall->setDoesNotThrow();
@@ -1400,7 +1566,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
- E->getBase()->getType().getCVRQualifiers());
+ E->getBase()->getType());
}
// Extend or truncate the index type to 32 or 64-bits.
@@ -1430,21 +1596,27 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// size is a VLA or Objective-C interface.
llvm::Value *Address = 0;
unsigned ArrayAlignment = 0;
- if (const VariableArrayType *VAT =
+ if (const VariableArrayType *vla =
getContext().getAsVariableArrayType(E->getType())) {
- llvm::Value *VLASize = GetVLASize(VAT);
-
- Idx = Builder.CreateMul(Idx, VLASize);
-
- // The base must be a pointer, which is not an aggregate. Emit it.
- llvm::Value *Base = EmitScalarExpr(E->getBase());
-
- Address = EmitCastToVoidPtr(Base);
- if (getContext().getLangOptions().isSignedOverflowDefined())
+ // The base must be a pointer, which is not an aggregate. Emit
+ // it. It needs to be emitted first in case it's what captures
+ // the VLA bounds.
+ Address = EmitScalarExpr(E->getBase());
+
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (getLangOptions().isSignedOverflowDefined()) {
+ Idx = Builder.CreateMul(Idx, numElements);
Address = Builder.CreateGEP(Address, Idx, "arrayidx");
- else
+ } else {
+ Idx = Builder.CreateNSWMul(Idx, numElements);
Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
- Address = Builder.CreateBitCast(Address, Base->getType());
+ }
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
llvm::Value *InterfaceSize =
@@ -1539,7 +1711,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
Base = EmitLValue(E->getBase());
} else {
// Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
- assert(E->getBase()->getType()->getAs<VectorType>() &&
+ assert(E->getBase()->getType()->isVectorType() &&
"Result must be a vector");
llvm::Value *Vec = EmitScalarExpr(E->getBase());
@@ -1548,6 +1720,9 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
Builder.CreateStore(Vec, VecMem);
Base = MakeAddrLValue(VecMem, E->getBase()->getType());
}
+
+ QualType type =
+ E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
// Encode the element access list into a vector of unsigned indices.
llvm::SmallVector<unsigned, 4> Indices;
@@ -1555,8 +1730,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
if (Base.isSimple()) {
llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices);
- return LValue::MakeExtVectorElt(Base.getAddress(), CV,
- Base.getVRQualifiers());
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type);
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
@@ -1570,8 +1744,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
}
llvm::Constant *CV = llvm::ConstantVector::get(CElts);
- return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
- Base.getVRQualifiers());
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type);
}
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
@@ -1622,7 +1795,7 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
CGM.getTypes().getCGRecordLayout(Field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
return LValue::MakeBitfield(BaseValue, Info,
- Field->getType().getCVRQualifiers()|CVRQualifiers);
+ Field->getType().withCVRQualifiers(CVRQualifiers));
}
/// EmitLValueForAnonRecordField - Given that the field is a member of
@@ -1656,20 +1829,14 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
bool mayAlias = rec->hasAttr<MayAliasAttr>();
- llvm::Value *addr;
+ llvm::Value *addr = baseAddr;
if (rec->isUnion()) {
- // For unions, we just cast to the appropriate type.
+ // For unions, there is no pointer adjustment.
assert(!type->isReferenceType() && "union has reference member");
-
- const llvm::Type *llvmType = CGM.getTypes().ConvertTypeForMem(type);
- unsigned AS =
- cast<llvm::PointerType>(baseAddr->getType())->getAddressSpace();
- addr = Builder.CreateBitCast(baseAddr, llvmType->getPointerTo(AS),
- field->getName());
} else {
// For structs, we GEP to the field that the record layout suggests.
unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
- addr = Builder.CreateStructGEP(baseAddr, idx, field->getName());
+ addr = Builder.CreateStructGEP(addr, idx, field->getName());
// If this is a reference field, load the reference right now.
if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
@@ -1691,6 +1858,14 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
cvr = 0; // qualifiers don't recursively apply to referencee
}
}
+
+ // Make sure that the address is pointing to the right type. This is critical
+ // for both unions and structs. A union needs a bitcast, a struct element
+ // will need a bitcast if the LLVM type laid out doesn't match the desired
+ // type.
+ addr = EmitBitCastOfLValueToProperType(*this, addr,
+ CGM.getTypes().ConvertTypeForMem(type),
+ field->getName());
unsigned alignment = getContext().getDeclAlign(field).getQuantity();
LValue LV = MakeAddrLValue(addr, type, alignment);
@@ -1722,9 +1897,17 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
CGM.getTypes().getCGRecordLayout(Field->getParent());
unsigned idx = RL.getLLVMFieldNo(Field);
llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
-
assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
+
+ // Make sure that the address is pointing to the right type. This is critical
+ // for both unions and structs. A union needs a bitcast, a struct element
+ // will need a bitcast if the LLVM type laid out doesn't match the desired
+ // type.
+ const llvm::Type *llvmType = ConvertTypeForMem(FieldType);
+ unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
+ V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
+
unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
return MakeAddrLValue(V, FieldType, Alignment);
}
@@ -1734,7 +1917,8 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
- EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false, /*Init*/ true);
+ EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
+ /*Init*/ true);
return Result;
}
@@ -1863,13 +2047,16 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_MemberPointerToBoolean:
- case CK_AnyPointerToBlockPointerCast: {
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject: {
// These casts only produce lvalues when we're binding a reference to a
// temporary realized from a (converted) pure rvalue. Emit the expression
// as a value, copy it into a temporary, and return an lvalue referring to
// that temporary.
llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
- EmitAnyExprToMem(E, V, false, false);
+ EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
return MakeAddrLValue(V, E->getType());
}
@@ -1954,11 +2141,18 @@ LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
return getOpaqueLValueMapping(e);
}
+LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ RValue RV = EmitReferenceBindingToExpr(E->GetTemporaryExpr(),
+ /*InitializedDecl=*/0);
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+
//===--------------------------------------------------------------------===//
// Expression Emission
//===--------------------------------------------------------------------===//
-
RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
if (CGDebugInfo *DI = getDebugInfo()) {
@@ -1988,13 +2182,57 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
- if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
- // C++ [expr.pseudo]p1:
- // The result shall only be used as the operand for the function call
- // operator (), and the result of such a call has type void. The only
- // effect is the evaluation of the postfix-expression before the dot or
- // arrow.
- EmitScalarExpr(E->getCallee());
+ if (const CXXPseudoDestructorExpr *PseudoDtor
+ = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
+ QualType DestroyedType = PseudoDtor->getDestroyedType();
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ DestroyedType->isObjCLifetimeType() &&
+ (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
+ DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
+ // Automatic Reference Counting:
+ // If the pseudo-expression names a retainable object with weak or
+ // strong lifetime, the object shall be released.
+ Expr *BaseExpr = PseudoDtor->getBase();
+ llvm::Value *BaseValue = NULL;
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (PseudoDtor->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCRelease(Builder.CreateLoad(BaseValue,
+ PseudoDtor->getDestroyedType().isVolatileQualified()),
+ /*precise*/ true);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCDestroyWeak(BaseValue);
+ break;
+ }
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitScalarExpr(E->getCallee());
+ }
+
return RValue::get(0);
}
@@ -2016,12 +2254,28 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
return EmitPointerToDataMemberBinaryExpr(E);
assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
+
+ // Note that in all of these cases, __block variables need the RHS
+ // evaluated first just in case the variable gets moved by the RHS.
if (!hasAggregateLLVMType(E->getType())) {
- // __block variables need the RHS evaluated first.
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ return EmitARCStoreStrong(E, /*ignored*/ false).first;
+
+ case Qualifiers::OCL_Autoreleasing:
+ return EmitARCStoreAutoreleasing(E).first;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Weak:
+ break;
+ }
+
RValue RV = EmitAnyExpr(E->getRHS());
LValue LV = EmitLValue(E->getLHS());
- EmitStoreThroughLValue(RV, LV, E->getType());
+ EmitStoreThroughLValue(RV, LV);
return LV;
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index d8da642..915ffd6 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -85,15 +85,16 @@ public:
Visit(GE->getResultExpr());
}
void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
+ void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
+ return Visit(E->getReplacement());
+ }
// l-values.
void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
- void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
- EmitAggLoadOfLValue(E);
- }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
EmitAggLoadOfLValue(E);
}
@@ -131,13 +132,13 @@ public:
void VisitExprWithCleanups(ExprWithCleanups *E);
void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
-
+ void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
void VisitOpaqueValueExpr(OpaqueValueExpr *E);
void VisitVAArgExpr(VAArgExpr *E);
- void EmitInitializationToLValue(Expr *E, LValue Address, QualType T);
- void EmitNullInitializationToLValue(LValue Address, QualType T);
+ void EmitInitializationToLValue(Expr *E, LValue Address);
+ void EmitNullInitializationToLValue(LValue Address);
// case Expr::ChooseExprClass:
void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
};
@@ -243,10 +244,31 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
// Visitor Methods
//===----------------------------------------------------------------------===//
+void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
+ Visit(E->GetTemporaryExpr());
+}
+
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
}
+void
+AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ if (E->getType().isPODType(CGF.getContext())) {
+ // For a POD type, just emit a load of the lvalue + a copy, because our
+ // compound literal might alias the destination.
+ // FIXME: This is a band-aid; the real problem appears to be in our handling
+ // of assignments, where we store directly into the LHS without checking
+ // whether anything in the RHS aliases.
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitAggExpr(E->getInitializer(), Slot);
+}
+
+
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
switch (E->getCastKind()) {
case CK_Dynamic: {
@@ -271,8 +293,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
QualType PtrTy = CGF.getContext().getPointerType(Ty);
llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
CGF.ConvertType(PtrTy));
- EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty),
- Ty);
+ EmitInitializationToLValue(E->getSubExpr(),
+ CGF.MakeAddrLValue(CastPtr, Ty));
break;
}
@@ -339,6 +361,9 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_IntegralComplexToBoolean:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -519,13 +544,13 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
QualType T = E->getType();
AggValueSlot Slot = EnsureSlot(T);
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T), T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
}
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
QualType T = E->getType();
AggValueSlot Slot = EnsureSlot(T);
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T), T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
}
/// isSimpleZero - If emitting this value will obviously just cause a store of
@@ -557,41 +582,46 @@ static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
void
-AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
+AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
+ QualType type = LV.getType();
// FIXME: Ignore result?
// FIXME: Are initializers affected by volatile?
if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
// Storing "i32 0" to a zero'd memory location is a noop.
} else if (isa<ImplicitValueInitExpr>(E)) {
- EmitNullInitializationToLValue(LV, T);
- } else if (T->isReferenceType()) {
+ EmitNullInitializationToLValue(LV);
+ } else if (type->isReferenceType()) {
RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
- CGF.EmitStoreThroughLValue(RV, LV, T);
- } else if (T->isAnyComplexType()) {
+ CGF.EmitStoreThroughLValue(RV, LV);
+ } else if (type->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
- } else if (CGF.hasAggregateLLVMType(T)) {
- CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true,
- false, Dest.isZeroed()));
+ } else if (CGF.hasAggregateLLVMType(type)) {
+ CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, true, false,
+ Dest.isZeroed()));
+ } else if (LV.isSimple()) {
+ CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
} else {
- CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T);
+ CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
}
}
-void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
+void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
+ QualType type = lv.getType();
+
// If the destination slot is already zeroed out before the aggregate is
// copied into it, we don't have to emit any zeros here.
- if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(T))
+ if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
return;
- if (!CGF.hasAggregateLLVMType(T)) {
+ if (!CGF.hasAggregateLLVMType(type)) {
// For non-aggregates, we can store zero
- llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
- CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T);
+ llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
+ CGF.EmitStoreThroughLValue(RValue::get(null), lv);
} else {
// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
// difficult for structures with the current code.
- CGF.EmitNullInitialization(LV.getAddress(), T);
+ CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
}
}
@@ -634,45 +664,135 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
uint64_t NumArrayElements = AType->getNumElements();
- QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
- ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
-
- bool hasNonTrivialCXXConstructor = false;
- if (CGF.getContext().getLangOptions().CPlusPlus)
- if (const RecordType *RT = CGF.getContext()
- .getBaseElementType(ElementType)->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- hasNonTrivialCXXConstructor = !RD->hasTrivialDefaultConstructor();
+ assert(NumInitElements <= NumArrayElements);
+
+ QualType elementType = E->getType().getCanonicalType();
+ elementType = CGF.getContext().getQualifiedType(
+ cast<ArrayType>(elementType)->getElementType(),
+ elementType.getQualifiers() + Dest.getQualifiers());
+
+ // DestPtr is an array*. Construct an elementType* by drilling
+ // down a level.
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ llvm::Value *indices[] = { zero, zero };
+ llvm::Value *begin =
+ Builder.CreateInBoundsGEP(DestPtr, indices, indices+2, "arrayinit.begin");
+
+ // Exception safety requires us to destroy all the
+ // already-constructed members if an initializer throws.
+ // For that, we'll need an EH cleanup.
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ llvm::AllocaInst *endOfInit = 0;
+ EHScopeStack::stable_iterator cleanup;
+ if (CGF.needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CGF.CreateTempAlloca(begin->getType(),
+ "arrayinit.endOfInit");
+ Builder.CreateStore(begin, endOfInit);
+ CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
+ CGF.getDestroyer(dtorKind));
+ cleanup = CGF.EHStack.stable_begin();
+
+ // Otherwise, remember that we didn't need a cleanup.
+ } else {
+ dtorKind = QualType::DK_none;
+ }
+
+ llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
+
+ // The 'current element to initialize'. The invariants on this
+ // variable are complicated. Essentially, after each iteration of
+ // the loop, it points to the last initialized element, except
+ // that it points to the beginning of the array before any
+ // elements have been initialized.
+ llvm::Value *element = begin;
+
+ // Emit the explicit initializers.
+ for (uint64_t i = 0; i != NumInitElements; ++i) {
+ // Advance to the next element.
+ if (i > 0) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
+
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
}
- // FIXME: were we intentionally ignoring address spaces and GC attributes?
+ LValue elementLV = CGF.MakeAddrLValue(element, elementType);
+ EmitInitializationToLValue(E->getInit(i), elementLV);
+ }
- for (uint64_t i = 0; i != NumArrayElements; ++i) {
- // If we're done emitting initializers and the destination is known-zeroed
- // then we're done.
- if (i == NumInitElements &&
- Dest.isZeroed() &&
- CGF.getTypes().isZeroInitializable(ElementType) &&
- !hasNonTrivialCXXConstructor)
- break;
+ // Check whether there's a non-trivial array-fill expression.
+ // Note that this will be a CXXConstructExpr even if the element
+ // type is an array (or array of array, etc.) of class type.
+ Expr *filler = E->getArrayFiller();
+ bool hasTrivialFiller = true;
+ if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
+ assert(cons->getConstructor()->isDefaultConstructor());
+ hasTrivialFiller = cons->getConstructor()->isTrivial();
+ }
- llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
- LValue LV = CGF.MakeAddrLValue(NextVal, ElementType);
-
- if (i < NumInitElements)
- EmitInitializationToLValue(E->getInit(i), LV, ElementType);
- else if (Expr *filler = E->getArrayFiller())
- EmitInitializationToLValue(filler, LV, ElementType);
+ // Any remaining elements need to be zero-initialized, possibly
+ // using the filler expression. We can skip this if the we're
+ // emitting to zeroed memory.
+ if (NumInitElements != NumArrayElements &&
+ !(Dest.isZeroed() && hasTrivialFiller &&
+ CGF.getTypes().isZeroInitializable(elementType))) {
+
+ // Use an actual loop. This is basically
+ // do { *array++ = filler; } while (array != end);
+
+ // Advance to the start of the rest of the array.
+ if (NumInitElements) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
+ }
+
+ // Compute the end of the array.
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin,
+ llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
+ "arrayinit.end");
+
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
+
+ // Jump into the body.
+ CGF.EmitBlock(bodyBB);
+ llvm::PHINode *currentElement =
+ Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
+ currentElement->addIncoming(element, entryBB);
+
+ // Emit the actual filler expression.
+ LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
+ if (filler)
+ EmitInitializationToLValue(filler, elementLV);
else
- EmitNullInitializationToLValue(LV, ElementType);
-
- // If the GEP didn't get used because of a dead zero init or something
- // else, clean it up for -O0 builds and general tidiness.
- if (llvm::GetElementPtrInst *GEP =
- dyn_cast<llvm::GetElementPtrInst>(NextVal))
- if (GEP->use_empty())
- GEP->eraseFromParent();
+ EmitNullInitializationToLValue(elementLV);
+
+ // Move on to the next element.
+ llvm::Value *nextElement =
+ Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
+
+ // Tell the EH cleanup that we finished with the last element.
+ if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
+
+ // Leave the loop if we're done.
+ llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
+ "arrayinit.done");
+ llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
+ Builder.CreateCondBr(done, endBB, bodyBB);
+ currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
+
+ CGF.EmitBlock(endBB);
}
+
+ // Leave the partial-array cleanup if we entered one.
+ if (dtorKind) CGF.DeactivateCleanupBlock(cleanup);
+
return;
}
@@ -683,9 +803,9 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
unsigned NumInitElements = E->getNumInits();
- RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+ RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
- if (E->getType()->isUnionType()) {
+ if (record->isUnion()) {
// Only initialize one field of a union. The field itself is
// specified by the initializer list.
if (!E->getInitializedFieldInUnion()) {
@@ -694,8 +814,8 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#ifndef NDEBUG
// Make sure that it's really an empty and not a failure of
// semantic analysis.
- for (RecordDecl::field_iterator Field = SD->field_begin(),
- FieldEnd = SD->field_end();
+ for (RecordDecl::field_iterator Field = record->field_begin(),
+ FieldEnd = record->field_end();
Field != FieldEnd; ++Field)
assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
#endif
@@ -708,55 +828,81 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
if (NumInitElements) {
// Store the initializer into the field
- EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType());
+ EmitInitializationToLValue(E->getInit(0), FieldLoc);
} else {
// Default-initialize to null.
- EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ EmitNullInitializationToLValue(FieldLoc);
}
return;
}
+ // We'll need to enter cleanup scopes in case any of the member
+ // initializers throw an exception.
+ llvm::SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
+
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
- unsigned CurInitVal = 0;
- for (RecordDecl::field_iterator Field = SD->field_begin(),
- FieldEnd = SD->field_end();
- Field != FieldEnd; ++Field) {
- // We're done once we hit the flexible array member
- if (Field->getType()->isIncompleteArrayType())
+ unsigned curInitIndex = 0;
+ for (RecordDecl::field_iterator field = record->field_begin(),
+ fieldEnd = record->field_end();
+ field != fieldEnd; ++field) {
+ // We're done once we hit the flexible array member.
+ if (field->getType()->isIncompleteArrayType())
break;
- if (Field->isUnnamedBitfield())
+ // Always skip anonymous bitfields.
+ if (field->isUnnamedBitfield())
continue;
- // Don't emit GEP before a noop store of zero.
- if (CurInitVal == NumInitElements && Dest.isZeroed() &&
+ // We're done if we reach the end of the explicit initializers, we
+ // have a zeroed object, and the rest of the fields are
+ // zero-initializable.
+ if (curInitIndex == NumInitElements && Dest.isZeroed() &&
CGF.getTypes().isZeroInitializable(E->getType()))
break;
// FIXME: volatility
- LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
+ LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
// We never generate write-barries for initialized fields.
- FieldLoc.setNonGC(true);
+ LV.setNonGC(true);
- if (CurInitVal < NumInitElements) {
+ if (curInitIndex < NumInitElements) {
// Store the initializer into the field.
- EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc,
- Field->getType());
+ EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
} else {
// We're out of initalizers; default-initialize to null
- EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ EmitNullInitializationToLValue(LV);
+ }
+
+ // Push a destructor if necessary.
+ // FIXME: if we have an array of structures, all explicitly
+ // initialized, we can end up pushing a linear number of cleanups.
+ bool pushedCleanup = false;
+ if (QualType::DestructionKind dtorKind
+ = field->getType().isDestructedType()) {
+ assert(LV.isSimple());
+ if (CGF.needsEHCleanup(dtorKind)) {
+ CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
+ CGF.getDestroyer(dtorKind), false);
+ cleanups.push_back(CGF.EHStack.stable_begin());
+ pushedCleanup = true;
+ }
}
// If the GEP didn't get used because of a dead zero init or something
// else, clean it up for -O0 builds and general tidiness.
- if (FieldLoc.isSimple())
+ if (!pushedCleanup && LV.isSimple())
if (llvm::GetElementPtrInst *GEP =
- dyn_cast<llvm::GetElementPtrInst>(FieldLoc.getAddress()))
+ dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
if (GEP->use_empty())
GEP->eraseFromParent();
}
+
+ // Deactivate all the partial cleanups in reverse order, which
+ // generally means popping them.
+ for (unsigned i = cleanups.size(); i != 0; --i)
+ CGF.DeactivateCleanupBlock(cleanups[i-1]);
}
//===----------------------------------------------------------------------===//
@@ -873,8 +1019,6 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
///
/// \param IsInitializer - true if this evaluation is initializing an
/// object whose lifetime is already being managed.
-//
-// FIXME: Take Qualifiers object.
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
bool IgnoreResult) {
assert(E && hasAggregateLLVMType(E->getType()) &&
@@ -892,7 +1036,7 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
llvm::Value *Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, AggValueSlot::forAddr(Temp, LV.isVolatileQualified(), false));
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, false));
return LV;
}
@@ -954,7 +1098,10 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
- if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ // Don't do any of the memmove_collectable tests if GC isn't set.
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) {
+ // fall through
+ } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
RecordDecl *Record = RecordTy->getDecl();
if (Record->hasObjectMember()) {
CharUnits size = TypeInfo.first;
@@ -964,7 +1111,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
SizeVal);
return;
}
- } else if (getContext().getAsArrayType(Ty)) {
+ } else if (Ty->isArrayType()) {
QualType BaseType = getContext().getBaseElementType(Ty);
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
if (RecordTy->getDecl()->hasObjectMember()) {
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 81fee67..4396f56 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -379,19 +379,11 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
}
}
- const ConstantArrayType *Array
- = getContext().getAsConstantArrayType(E->getType());
- if (Array) {
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Dest.getAddr(), BasePtr);
-
- EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
+ if (const ConstantArrayType *arrayType
+ = getContext().getAsConstantArrayType(E->getType())) {
+ EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
E->arg_begin(), E->arg_end());
- }
- else {
+ } else {
CXXCtorType Type = Ctor_Complete;
bool ForVirtualBase = false;
@@ -619,10 +611,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// can be ignored because the result shouldn't be used if
// allocation fails.
if (typeSizeMultiplier != 1) {
- const llvm::Type *intrinsicTypes[] = { CGF.SizeTy };
llvm::Value *umul_with_overflow
- = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow,
- intrinsicTypes, 1);
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
llvm::Value *tsmV =
llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
@@ -661,10 +651,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
if (cookieSize != 0) {
sizeWithoutCookie = size;
- const llvm::Type *intrinsicTypes[] = { CGF.SizeTy };
llvm::Value *uadd_with_overflow
- = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow,
- intrinsicTypes, 1);
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
llvm::Value *result =
@@ -707,16 +695,15 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
unsigned Alignment =
CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
- if (!CGF.hasAggregateLLVMType(AllocType))
- CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
- AllocType.isVolatileQualified(), Alignment,
- AllocType);
+ if (!CGF.hasAggregateLLVMType(AllocType))
+ CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType, Alignment),
+ false);
else if (AllocType->isAnyComplexType())
CGF.EmitComplexExprIntoAddr(Init, NewPtr,
AllocType.isVolatileQualified());
else {
AggValueSlot Slot
- = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
+ = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(), true);
CGF.EmitAggExpr(Init, Slot);
}
}
@@ -806,7 +793,7 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
RequiresZeroInitialization = true;
}
-
+
CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
E->constructor_arg_begin(),
E->constructor_arg_end(),
@@ -875,7 +862,7 @@ namespace {
getPlacementArgs()[I] = Arg;
}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
const FunctionProtoType *FPT
= OperatorDelete->getType()->getAs<FunctionProtoType>();
assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
@@ -932,7 +919,7 @@ namespace {
getPlacementArgs()[I] = Arg;
}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
const FunctionProtoType *FPT
= OperatorDelete->getType()->getAs<FunctionProtoType>();
assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
@@ -1075,7 +1062,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// CXXNewExpr::shouldNullCheckAllocation()) and we have an
// interesting initializer.
bool nullCheck = allocatorType->isNothrow(getContext()) &&
- !(allocType->isPODType() && !E->hasInitializer());
+ !(allocType.isPODType(getContext()) && !E->hasInitializer());
llvm::BasicBlock *nullCheckBB = 0;
llvm::BasicBlock *contBB = 0;
@@ -1202,7 +1189,7 @@ namespace {
QualType ElementType)
: Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
}
};
@@ -1212,7 +1199,8 @@ namespace {
static void EmitObjectDelete(CodeGenFunction &CGF,
const FunctionDecl *OperatorDelete,
llvm::Value *Ptr,
- QualType ElementType) {
+ QualType ElementType,
+ bool UseGlobalDelete) {
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = 0;
@@ -1222,17 +1210,30 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
Dtor = RD->getDestructor();
if (Dtor->isVirtual()) {
+ if (UseGlobalDelete) {
+ // If we're supposed to call the global delete, make sure we do so
+ // even if the destructor throws.
+ CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
+ Ptr, OperatorDelete,
+ ElementType);
+ }
+
const llvm::Type *Ty =
CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
Dtor_Complete),
/*isVariadic=*/false);
llvm::Value *Callee
- = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
+ = CGF.BuildVirtualCall(Dtor,
+ UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
+ Ptr, Ty);
CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
0, 0);
- // The dtor took care of deleting the object.
+ if (UseGlobalDelete) {
+ CGF.PopCleanupBlock();
+ }
+
return;
}
}
@@ -1247,7 +1248,29 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
if (Dtor)
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false, Ptr);
+ else if (CGF.getLangOptions().ObjCAutoRefCount &&
+ ElementType->isObjCLifetimeType()) {
+ switch (ElementType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+ case Qualifiers::OCL_Strong: {
+ // Load the pointer value.
+ llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
+ ElementType.isVolatileQualified());
+
+ CGF.EmitARCRelease(PtrValue, /*precise*/ true);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCDestroyWeak(Ptr);
+ break;
+ }
+ }
+
CGF.PopCleanupBlock();
}
@@ -1268,7 +1291,7 @@ namespace {
: Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
ElementType(ElementType), CookieSize(CookieSize) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
const FunctionProtoType *DeleteFTy =
OperatorDelete->getType()->getAs<FunctionProtoType>();
assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
@@ -1316,31 +1339,40 @@ namespace {
/// Emit the code for deleting an array of objects.
static void EmitArrayDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *E,
- llvm::Value *Ptr,
- QualType ElementType) {
- llvm::Value *NumElements = 0;
- llvm::Value *AllocatedPtr = 0;
- CharUnits CookieSize;
- CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
- NumElements, AllocatedPtr, CookieSize);
+ llvm::Value *deletedPtr,
+ QualType elementType) {
+ llvm::Value *numElements = 0;
+ llvm::Value *allocatedPtr = 0;
+ CharUnits cookieSize;
+ CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
+ numElements, allocatedPtr, cookieSize);
- assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
+ assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
// Make sure that we call delete even if one of the dtors throws.
- const FunctionDecl *OperatorDelete = E->getOperatorDelete();
+ const FunctionDecl *operatorDelete = E->getOperatorDelete();
CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
- AllocatedPtr, OperatorDelete,
- NumElements, ElementType,
- CookieSize);
-
- if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
- if (!RD->hasTrivialDestructor()) {
- assert(NumElements && "ReadArrayCookie didn't find element count"
- " for a class with destructor");
- CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
- }
+ allocatedPtr, operatorDelete,
+ numElements, elementType,
+ cookieSize);
+
+ // Destroy the elements.
+ if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
+ assert(numElements && "no element count for a type with a destructor!");
+
+ llvm::Value *arrayEnd =
+ CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
+
+ // Note that it is legal to allocate a zero-length array, and we
+ // can never fold the check away because the length should always
+ // come from a cookie.
+ CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
+ CGF.getDestroyer(dtorKind),
+ /*checkZeroLength*/ true,
+ CGF.needsEHCleanup(dtorKind));
}
+ // Pop the cleanup block.
CGF.PopCleanupBlock();
}
@@ -1397,7 +1429,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
} else {
- EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
+ EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
+ E->isGlobalDelete());
}
EmitBlock(DeleteEnd);
@@ -1415,7 +1448,7 @@ static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
static void EmitBadTypeidCall(CodeGenFunction &CGF) {
llvm::Value *Fn = getBadTypeidFn(CGF);
- CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn();
+ CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
CGF.Builder.CreateUnreachable();
}
@@ -1489,11 +1522,11 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
// const abi::__class_type_info *dst,
// std::ptrdiff_t src2dst_offset);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *PtrDiffTy =
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- const llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
+ llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
const llvm::FunctionType *FTy =
llvm::FunctionType::get(Int8PtrTy, Args, false);
@@ -1513,7 +1546,7 @@ static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
static void EmitBadCastCall(CodeGenFunction &CGF) {
llvm::Value *Fn = getBadCastFn(CGF);
- CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn();
+ CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
CGF.Builder.CreateUnreachable();
}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index bd19586..35cff1d 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -112,6 +112,10 @@ public:
return Visit(GE->getResultExpr());
}
ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
+ ComplexPairTy
+ VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
+ return Visit(PE->getReplacement());
+ }
// l-values.
ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
@@ -352,6 +356,8 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
QualType DestTy) {
switch (CK) {
+ case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+
case CK_GetObjCProperty: {
LValue LV = CGF.EmitLValue(Op);
assert(LV.isPropertyRef() && "Unknown LValue type!");
@@ -360,39 +366,74 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
case CK_NoOp:
case CK_LValueToRValue:
+ case CK_UserDefinedConversion:
return Visit(Op);
- // TODO: do all of these
- default:
- break;
- }
-
- // Two cases here: cast from (complex to complex) and (scalar to complex).
- if (Op->getType()->isAnyComplexType())
- return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
-
- // FIXME: We should be looking at all of the cast kinds here, not
- // cherry-picking the ones we have test cases for.
- if (CK == CK_LValueBitCast) {
+ case CK_LValueBitCast: {
llvm::Value *V = CGF.EmitLValue(Op).getAddress();
V = Builder.CreateBitCast(V,
- CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
+ CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
// FIXME: Are the qualifiers correct here?
return EmitLoadOfComplex(V, DestTy.isVolatileQualified());
}
-
- // C99 6.3.1.7: When a value of real type is converted to a complex type, the
- // real part of the complex result value is determined by the rules of
- // conversion to the corresponding real type and the imaginary part of the
- // complex result value is a positive zero or an unsigned zero.
- llvm::Value *Elt = CGF.EmitScalarExpr(Op);
-
- // Convert the input element to the element type of the complex.
- DestTy = DestTy->getAs<ComplexType>()->getElementType();
- Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
-
- // Return (realval, 0).
- return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_FloatingRealToComplex:
+ case CK_IntegralRealToComplex: {
+ llvm::Value *Elt = CGF.EmitScalarExpr(Op);
+
+ // Convert the input element to the element type of the complex.
+ DestTy = DestTy->getAs<ComplexType>()->getElementType();
+ Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
+
+ // Return (realval, 0).
+ return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+ }
+
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
}
ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index da37bd5..45e44dd 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -433,9 +433,19 @@ llvm::Constant *ConstStructBuilder::
if (!Builder.Build(ILE))
return 0;
+ // Pick the type to use. If the type is layout identical to the ConvertType
+ // type then use it, otherwise use whatever the builder produced for us.
+ const llvm::StructType *STy =
+ llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
+ Builder.Elements,Builder.Packed);
+ const llvm::Type *ILETy = CGM.getTypes().ConvertType(ILE->getType());
+ if (const llvm::StructType *ILESTy = dyn_cast<llvm::StructType>(ILETy)) {
+ if (ILESTy->isLayoutIdentical(STy))
+ STy = ILESTy;
+ }
+
llvm::Constant *Result =
- llvm::ConstantStruct::get(CGM.getLLVMContext(),
- Builder.Elements, Builder.Packed);
+ llvm::ConstantStruct::get(STy, Builder.Elements);
assert(Builder.NextFieldOffsetInChars.RoundUpToAlignment(
Builder.getAlignment(Result)) ==
@@ -471,6 +481,11 @@ public:
return Visit(PE->getSubExpr());
}
+ llvm::Constant *
+ VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
+ return Visit(PE->getReplacement());
+ }
+
llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
return Visit(GE->getResultExpr());
}
@@ -523,7 +538,7 @@ public:
// Build a struct with the union sub-element as the first member,
// and padded to the appropriate size
std::vector<llvm::Constant*> Elts;
- std::vector<const llvm::Type*> Types;
+ std::vector<llvm::Type*> Types;
Elts.push_back(C);
Types.push_back(C->getType());
unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
@@ -531,7 +546,7 @@ public:
assert(CurSize <= TotalSize && "Union size mismatch!");
if (unsigned NumPadBytes = TotalSize - CurSize) {
- const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
if (NumPadBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumPadBytes);
@@ -570,6 +585,9 @@ public:
case CK_GetObjCProperty:
case CK_ToVoid:
case CK_Dynamic:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject:
return 0;
// These might need to be supported for constexpr.
@@ -650,6 +668,10 @@ public:
return Visit(DAE->getExpr());
}
+ llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ return Visit(E->GetTemporaryExpr());
+ }
+
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
unsigned NumInitElements = ILE->getNumInits();
if (NumInitElements == 1 && ILE->getType() == ILE->getInit(0)->getType() &&
@@ -694,7 +716,7 @@ public:
if (RewriteType) {
// FIXME: Try to avoid packing the array
- std::vector<const llvm::Type*> Types;
+ std::vector<llvm::Type*> Types;
for (unsigned i = 0; i < Elts.size(); ++i)
Types.push_back(Elts[i]->getType());
const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
@@ -986,7 +1008,10 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
Result.Val.getComplexIntImag());
// FIXME: the target may want to specify that this is packed.
- return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
}
case APValue::Float:
return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
@@ -999,7 +1024,10 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
Result.Val.getComplexFloatImag());
// FIXME: the target may want to specify that this is packed.
- return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
}
case APValue::Vector: {
llvm::SmallVector<llvm::Constant *, 4> Inits;
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index dff7bf4..a73e667 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -82,15 +82,15 @@ public:
LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
- Value *EmitLoadOfLValue(LValue LV, QualType T) {
- return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
+ Value *EmitLoadOfLValue(LValue LV) {
+ return CGF.EmitLoadOfLValue(LV).getScalarVal();
}
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
- return EmitLoadOfLValue(EmitCheckedLValue(E), E->getType());
+ return EmitLoadOfLValue(EmitCheckedLValue(E));
}
/// EmitConversionToBool - Convert the specified expression value to a
@@ -161,6 +161,9 @@ public:
Value *VisitParenExpr(ParenExpr *PE) {
return Visit(PE->getSubExpr());
}
+ Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
+ return Visit(E->getReplacement());
+ }
Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
return Visit(GE->getResultExpr());
}
@@ -197,7 +200,7 @@ public:
Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
- return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getType());
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
// Otherwise, assume the mapping is the scalar directly.
return CGF.getOpaqueRValueMapping(E).getScalarVal();
@@ -252,7 +255,7 @@ public:
Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
LValue LV = CGF.EmitObjCIsaExpr(E);
- Value *V = CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal();
+ Value *V = CGF.EmitLoadOfLValue(LV).getScalarVal();
return V;
}
@@ -269,14 +272,12 @@ public:
Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
return CGF.CGM.EmitNullConstant(E->getType());
}
- Value *VisitCastExpr(CastExpr *E) {
- // Make sure to evaluate VLA bounds now so that we have them for later.
+ Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
if (E->getType()->isVariablyModifiedType())
- CGF.EmitVLASize(E->getType());
-
- return EmitCastExpr(E);
+ CGF.EmitVariablyModifiedType(E->getType());
+ return VisitCastExpr(E);
}
- Value *EmitCastExpr(CastExpr *E);
+ Value *VisitCastExpr(CastExpr *E);
Value *VisitCallExpr(const CallExpr *E) {
if (E->getCallReturnType()->isReferenceType())
@@ -1001,7 +1002,7 @@ static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
// have to handle a more broad range of conversions than explicit casts, as they
// handle things like function to ptr-to-function decay etc.
-Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
+Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Expr *E = CE->getSubExpr();
QualType DestTy = CE->getType();
CastKind Kind = CE->getCastKind();
@@ -1020,7 +1021,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
Value *V = EmitLValue(E).getAddress();
V = Builder.CreateBitCast(V,
ConvertType(CGF.getContext().getPointerType(DestTy)));
- return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), DestTy);
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy));
}
case CK_AnyPointerToObjCPointerCast:
@@ -1106,7 +1107,17 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// function pointers on Itanium and ARM).
return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
}
-
+
+ case CK_ObjCProduceObject:
+ return CGF.EmitARCRetainScalarExpr(E);
+ case CK_ObjCConsumeObject:
+ return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
+ case CK_ObjCReclaimReturnedObject: {
+ llvm::Value *value = Visit(E);
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+ return CGF.EmitObjCConsumeObject(E->getType(), value);
+ }
+
case CK_FloatingRealToComplex:
case CK_FloatingComplexCast:
case CK_IntegralRealToComplex:
@@ -1122,7 +1133,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
assert(E->isGLValue() && E->getObjectKind() == OK_ObjCProperty &&
"CK_GetObjCProperty for non-lvalue or non-ObjCProperty");
- RValue RV = CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType());
+ RValue RV = CGF.EmitLoadOfLValue(CGF.EmitLValue(E));
return RV.getScalarVal();
}
@@ -1143,15 +1154,10 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
}
- case CK_PointerToIntegral: {
- Value *Src = Visit(const_cast<Expr*>(E));
-
- // Handle conversion to bool correctly.
- if (DestTy->isBooleanType())
- return EmitScalarConversion(Src, E->getType(), DestTy);
+ case CK_PointerToIntegral:
+ assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
+ return Builder.CreatePtrToInt(Visit(E), ConvertType(DestTy));
- return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
- }
case CK_ToVoid: {
CGF.EmitIgnoredExpr(E);
return 0;
@@ -1221,7 +1227,7 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
LValue LV = CGF.EmitBlockDeclRefLValue(E);
- return CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal();
+ return CGF.EmitLoadOfLValue(LV).getScalarVal();
}
//===----------------------------------------------------------------------===//
@@ -1258,7 +1264,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
QualType type = E->getSubExpr()->getType();
- llvm::Value *value = EmitLoadOfLValue(LV, type);
+ llvm::Value *value = EmitLoadOfLValue(LV);
llvm::Value *input = value;
int amount = (isInc ? 1 : -1);
@@ -1282,7 +1288,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// overflow because of promotion rules; we're just eliding a few steps here.
if (type->isSignedIntegerOrEnumerationType() &&
value->getType()->getPrimitiveSizeInBits() >=
- CGF.CGM.IntTy->getBitWidth())
+ CGF.IntTy->getBitWidth())
value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
else
value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
@@ -1292,16 +1298,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
QualType type = ptr->getPointeeType();
// VLA types don't have constant size.
- if (type->isVariableArrayType()) {
- llvm::Value *vlaSize =
- CGF.GetVLASize(CGF.getContext().getAsVariableArrayType(type));
- value = CGF.EmitCastToVoidPtr(value);
- if (!isInc) vlaSize = Builder.CreateNSWNeg(vlaSize, "vla.negsize");
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(type)) {
+ llvm::Value *numElts = CGF.getVLASize(vla).first;
+ if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
- value = Builder.CreateGEP(value, vlaSize, "vla.inc");
+ value = Builder.CreateGEP(value, numElts, "vla.inc");
else
- value = Builder.CreateInBoundsGEP(value, vlaSize, "vla.inc");
- value = Builder.CreateBitCast(value, input->getType());
+ value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
// Arithmetic on function pointers (!) is just +-1.
} else if (type->isFunctionType()) {
@@ -1374,9 +1378,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Store the updated result through the lvalue.
if (LV.isBitField())
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, type, &value);
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
else
- CGF.EmitStoreThroughLValue(RValue::get(value), LV, type);
+ CGF.EmitStoreThroughLValue(RValue::get(value), LV);
// If this is a postinc, return the value read from memory, otherwise use the
// updated value.
@@ -1521,14 +1525,25 @@ ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
CGF.getContext().getAsVariableArrayType(TypeToSize)) {
if (E->isArgumentType()) {
// sizeof(type) - make sure to emit the VLA size.
- CGF.EmitVLASize(TypeToSize);
+ CGF.EmitVariablyModifiedType(TypeToSize);
} else {
// C99 6.5.3.4p2: If the argument is an expression of type
// VLA, it is evaluated.
CGF.EmitIgnoredExpr(E->getArgumentExpr());
}
- return CGF.GetVLASize(VAT);
+ QualType eltType;
+ llvm::Value *numElts;
+ llvm::tie(numElts, eltType) = CGF.getVLASize(VAT);
+
+ llvm::Value *size = numElts;
+
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
+ if (!eltSize.isOne())
+ size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), numElts);
+
+ return size;
}
}
@@ -1546,8 +1561,7 @@ Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
// Note that we have to ask E because Op might be an l-value that
// this won't work for, e.g. an Obj-C property.
if (E->isGLValue())
- return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
- .getScalarVal();
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal();
// Otherwise, calculate and project.
return CGF.EmitComplexExpr(Op, false, true).first;
@@ -1563,8 +1577,7 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
// Note that we have to ask E because Op might be an l-value that
// this won't work for, e.g. an Obj-C property.
if (Op->isGLValue())
- return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
- .getScalarVal();
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal();
// Otherwise, calculate and project.
return CGF.EmitComplexExpr(Op, true, false).second;
@@ -1616,7 +1629,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.E = E;
// Load/convert the LHS.
LValue LHSLV = EmitCheckedLValue(E->getLHS());
- OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV);
OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
E->getComputationLHSType());
@@ -1631,10 +1644,9 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// 'An assignment expression has the value of the left operand after the
// assignment...'.
if (LHSLV.isBitField())
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
- &Result);
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
else
- CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
+ CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
return LHSLV;
}
@@ -1662,15 +1674,17 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
return RHS;
// Otherwise, reload the value.
- return EmitLoadOfLValue(LHS, E->getType());
+ return EmitLoadOfLValue(LHS);
}
void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
const BinOpInfo &Ops,
llvm::Value *Zero, bool isDiv) {
- llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+ llvm::Function::iterator insertPt = Builder.GetInsertBlock();
llvm::BasicBlock *contBB =
- CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn);
+ CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn,
+ llvm::next(insertPt));
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
const llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
@@ -1700,9 +1714,11 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
if (Ops.Ty->isIntegerType())
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
else if (Ops.Ty->isRealFloatingType()) {
+ llvm::Function::iterator insertPt = Builder.GetInsertBlock();
+ llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn,
+ llvm::next(insertPt));
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow",
CGF.CurFn);
- llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn);
CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero),
overflowBB, DivCont);
EmitOverflowBB(overflowBB);
@@ -1759,9 +1775,9 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
OpID <<= 1;
OpID |= 1;
- const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
+ llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
- llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1);
+ llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
@@ -1769,8 +1785,10 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
// Branch in case of overflow.
llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
+ llvm::Function::iterator insertPt = initialBB;
+ llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn,
+ llvm::next(insertPt));
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
- llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn);
Builder.CreateCondBr(overflow, overflowBB, continueBB);
@@ -1788,8 +1806,8 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Builder.SetInsertPoint(overflowBB);
// Get the overflow handler.
- const llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext);
- const llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
+ llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
llvm::FunctionType *handlerTy =
llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
@@ -1817,196 +1835,187 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
return phi;
}
-Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
- if (!Ops.Ty->isAnyPointerType()) {
- if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
- case LangOptions::SOB_Defined:
- return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
- case LangOptions::SOB_Trapping:
- return EmitOverflowCheckedBinOp(Ops);
- }
- }
-
- if (Ops.LHS->getType()->isFPOrFPVectorTy())
- return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
-
- return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
- }
-
- // Must have binary (not unary) expr here. Unary pointer decrement doesn't
- // use this path.
- const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
-
- if (Ops.Ty->isPointerType() &&
- Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition needs to account for the VLA size
- CGF.ErrorUnsupported(BinOp, "VLA pointer addition");
- }
+/// Emit pointer + index arithmetic.
+static Value *emitPointerArithmetic(CodeGenFunction &CGF,
+ const BinOpInfo &op,
+ bool isSubtraction) {
+ // Must have binary (not unary) expr here. Unary pointer
+ // increment/decrement doesn't use this path.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
- Value *Ptr, *Idx;
- Expr *IdxExp;
- const PointerType *PT = BinOp->getLHS()->getType()->getAs<PointerType>();
- const ObjCObjectPointerType *OPT =
- BinOp->getLHS()->getType()->getAs<ObjCObjectPointerType>();
- if (PT || OPT) {
- Ptr = Ops.LHS;
- Idx = Ops.RHS;
- IdxExp = BinOp->getRHS();
- } else { // int + pointer
- PT = BinOp->getRHS()->getType()->getAs<PointerType>();
- OPT = BinOp->getRHS()->getType()->getAs<ObjCObjectPointerType>();
- assert((PT || OPT) && "Invalid add expr");
- Ptr = Ops.RHS;
- Idx = Ops.LHS;
- IdxExp = BinOp->getLHS();
- }
-
- unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (Width < CGF.PointerWidthInBits) {
- // Zero or sign extend the pointer value based on whether the index is
- // signed or not.
- const llvm::Type *IdxType = CGF.IntPtrTy;
- if (IdxExp->getType()->isSignedIntegerOrEnumerationType())
- Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
- else
- Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
- }
- const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
- // Handle interface types, which are not represented with a concrete type.
- if (const ObjCObjectType *OIT = ElementType->getAs<ObjCObjectType>()) {
- llvm::Value *InterfaceSize =
- llvm::ConstantInt::get(Idx->getType(),
- CGF.getContext().getTypeSizeInChars(OIT).getQuantity());
- Idx = Builder.CreateMul(Idx, InterfaceSize);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
- Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
- return Builder.CreateBitCast(Res, Ptr->getType());
+ Value *pointer = op.LHS;
+ Expr *pointerOperand = expr->getLHS();
+ Value *index = op.RHS;
+ Expr *indexOperand = expr->getRHS();
+
+ // In a subtraction, the LHS is always the pointer.
+ if (!isSubtraction && !pointer->getType()->isPointerTy()) {
+ std::swap(pointer, index);
+ std::swap(pointerOperand, indexOperand);
+ }
+
+ unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
+ if (width != CGF.PointerWidthInBits) {
+ // Zero-extend or sign-extend the pointer value according to
+ // whether the index is signed or not.
+ bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
+ index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned,
+ "idx.ext");
+ }
+
+ // If this is subtraction, negate the index.
+ if (isSubtraction)
+ index = CGF.Builder.CreateNeg(index, "idx.neg");
+
+ const PointerType *pointerType
+ = pointerOperand->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ QualType objectType = pointerOperand->getType()
+ ->castAs<ObjCObjectPointerType>()
+ ->getPointeeType();
+ llvm::Value *objectSize
+ = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
+
+ index = CGF.Builder.CreateMul(index, objectSize);
+
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
+ }
+
+ QualType elementType = pointerType->getPointeeType();
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = CGF.getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (CGF.getLangOptions().isSignedOverflowDefined()) {
+ index = CGF.Builder.CreateMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+ } else {
+ index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+ }
+ return pointer;
}
// Explicitly handle GNU void* and function pointer arithmetic extensions. The
// GNU void* casts amount to no-ops since our void* type is i8*, but this is
// future proof.
- if (ElementType->isVoidType() || ElementType->isFunctionType()) {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
- Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
- return Builder.CreateBitCast(Res, Ptr->getType());
+ if (elementType->isVoidType() || elementType->isFunctionType()) {
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
}
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
- return Builder.CreateGEP(Ptr, Idx, "add.ptr");
- return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr");
+ if (CGF.getLangOptions().isSignedOverflowDefined())
+ return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+
+ return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
}
-Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
- if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
- if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
+ if (op.LHS->getType()->isPointerTy() ||
+ op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
+
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(op);
+ }
+ }
+
+ if (op.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFAdd(op.LHS, op.RHS, "add");
+
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+}
+
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
+ // The LHS is always a pointer if either side is.
+ if (!op.LHS->getType()->isPointerTy()) {
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
- return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
+ return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Defined:
- return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Trapping:
- return EmitOverflowCheckedBinOp(Ops);
+ return EmitOverflowCheckedBinOp(op);
}
}
- if (Ops.LHS->getType()->isFPOrFPVectorTy())
- return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
+ if (op.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFSub(op.LHS, op.RHS, "sub");
- return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
}
- // Must have binary (not unary) expr here. Unary pointer increment doesn't
- // use this path.
- const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
-
- if (BinOp->getLHS()->getType()->isPointerType() &&
- BinOp->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition needs to account for the VLA size for
- // ptr-int
- // The amount of the division needs to account for the VLA size for
- // ptr-ptr.
- CGF.ErrorUnsupported(BinOp, "VLA pointer subtraction");
- }
-
- const QualType LHSType = BinOp->getLHS()->getType();
- const QualType LHSElementType = LHSType->getPointeeType();
- if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
- // pointer - int
- Value *Idx = Ops.RHS;
- unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (Width < CGF.PointerWidthInBits) {
- // Zero or sign extend the pointer value based on whether the index is
- // signed or not.
- const llvm::Type *IdxType = CGF.IntPtrTy;
- if (BinOp->getRHS()->getType()->isSignedIntegerOrEnumerationType())
- Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
- else
- Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
- }
- Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
-
- // Handle interface types, which are not represented with a concrete type.
- if (const ObjCObjectType *OIT = LHSElementType->getAs<ObjCObjectType>()) {
- llvm::Value *InterfaceSize =
- llvm::ConstantInt::get(Idx->getType(),
- CGF.getContext().
- getTypeSizeInChars(OIT).getQuantity());
- Idx = Builder.CreateMul(Idx, InterfaceSize);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
- Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
- return Builder.CreateBitCast(Res, Ops.LHS->getType());
- }
+ // If the RHS is not a pointer, then we have normal pointer
+ // arithmetic.
+ if (!op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ true);
- // Explicitly handle GNU void* and function pointer arithmetic
- // extensions. The GNU void* casts amount to no-ops since our void* type is
- // i8*, but this is future proof.
- if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
- Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
- return Builder.CreateBitCast(Res, Ops.LHS->getType());
- }
+ // Otherwise, this is a pointer subtraction.
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
- return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
- return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr");
- }
-
- // pointer - pointer
- Value *LHS = Ops.LHS;
- Value *RHS = Ops.RHS;
+ // Do the raw subtraction part.
+ llvm::Value *LHS
+ = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
+ llvm::Value *RHS
+ = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
+ Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
- CharUnits ElementSize;
+ // Okay, figure out the element size.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
+ QualType elementType = expr->getLHS()->getType()->getPointeeType();
- // Handle GCC extension for pointer arithmetic on void* and function pointer
- // types.
- if (LHSElementType->isVoidType() || LHSElementType->isFunctionType())
- ElementSize = CharUnits::One();
- else
- ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType);
+ llvm::Value *divisor = 0;
- const llvm::Type *ResultType = ConvertType(Ops.Ty);
- LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
- RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
- Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+ // For a variable-length array, this is going to be non-constant.
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ llvm::Value *numElements;
+ llvm::tie(numElements, elementType) = CGF.getVLASize(vla);
- // Optimize out the shift for element size of 1.
- if (ElementSize.isOne())
- return BytesBetween;
+ divisor = numElements;
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
+ if (!eltSize.isOne())
+ divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
+
+ // For everything elese, we can just compute it, safe in the
+ // assumption that Sema won't let anything through that we can't
+ // safely compute the size of.
+ } else {
+ CharUnits elementSize;
+ // Handle GCC extension for pointer arithmetic on void* and
+ // function pointer types.
+ if (elementType->isVoidType() || elementType->isFunctionType())
+ elementSize = CharUnits::One();
+ else
+ elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+
+ // Don't even emit the divide for element size of 1.
+ if (elementSize.isOne())
+ return diffInChars;
+
+ divisor = CGF.CGM.getSize(elementSize);
+ }
+
// Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
// pointer difference in C is only defined in the case where both operands
// are pointing to elements of an array.
- Value *BytesPerElt =
- llvm::ConstantInt::get(ResultType, ElementSize.getQuantity());
- return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
+ return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
}
Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
@@ -2228,20 +2237,41 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
bool Ignore = TestAndClearIgnoreResultAssign();
- // __block variables need to have the rhs evaluated first, plus this should
- // improve codegen just a little.
- Value *RHS = Visit(E->getRHS());
- LValue LHS = EmitCheckedLValue(E->getLHS());
-
- // Store the value into the LHS. Bit-fields are handled specially
- // because the result is altered by the store, i.e., [C99 6.5.16p1]
- // 'An assignment expression has the value of the left operand after
- // the assignment...'.
- if (LHS.isBitField())
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
- &RHS);
- else
- CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+ Value *RHS;
+ LValue LHS;
+
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ llvm::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ llvm::tie(LHS,RHS) = CGF.EmitARCStoreAutoreleasing(E);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS());
+ RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
+ break;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // __block variables need to have the rhs evaluated first, plus
+ // this should improve codegen just a little.
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS());
+
+ // Store the value into the LHS. Bit-fields are handled specially
+ // because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after
+ // the assignment...'.
+ if (LHS.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
+ }
// If the result is clearly ignored, return now.
if (Ignore)
@@ -2260,7 +2290,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
return RHS;
// Otherwise, reload the value.
- return EmitLoadOfLValue(LHS, E->getType());
+ return EmitLoadOfLValue(LHS);
}
Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
@@ -2548,7 +2578,7 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
- const llvm::Type * DstTy = ConvertType(E->getDstType());
+ const llvm::Type *DstTy = ConvertType(E->getType());
// Going from vec4->vec3 or vec3->vec4 is a special case and requires
// a shuffle vector instead of a bitcast.
@@ -2656,7 +2686,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
llvm::Value *Src = EmitScalarExpr(BaseExpr);
Builder.CreateStore(Src, V);
V = ScalarExprEmitter(*this).EmitLoadOfLValue(
- MakeAddrLValue(V, E->getType()), E->getType());
+ MakeAddrLValue(V, E->getType()));
} else {
if (E->isArrow())
V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index fa42cd1..426cca0 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -15,15 +15,29 @@
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/InlineAsm.h"
using namespace clang;
using namespace CodeGen;
+typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
+
+/// Given the address of a variable of pointer type, find the correct
+/// null to store into it.
+static llvm::Constant *getNullForVariable(llvm::Value *addr) {
+ const llvm::Type *type =
+ cast<llvm::PointerType>(addr->getType())->getElementType();
+ return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
+}
+
/// Emits an instance of NSConstantString representing the object.
llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
@@ -55,6 +69,7 @@ static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
RValue Result) {
if (!Method)
return Result;
+
if (!Method->hasRelatedResultType() ||
CGF.getContext().hasSameType(E->getType(), Method->getResultType()) ||
!Result.isScalar())
@@ -71,6 +86,18 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// implementation vary between runtimes. We can get the receiver and
// arguments in generic code.
+ bool isDelegateInit = E->isDelegateInitCall();
+
+ // We don't retain the receiver in delegate init calls, and this is
+ // safe because the receiver value is always loaded from 'self',
+ // which we zero out. We don't want to Block_copy block receivers,
+ // though.
+ bool retainSelf =
+ (!isDelegateInit &&
+ CGM.getLangOptions().ObjCAutoRefCount &&
+ E->getMethodDecl() &&
+ E->getMethodDecl()->hasAttr<NSConsumesSelfAttr>());
+
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
bool isSuperMessage = false;
bool isClassMessage = false;
@@ -80,8 +107,15 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
llvm::Value *Receiver = 0;
switch (E->getReceiverKind()) {
case ObjCMessageExpr::Instance:
- Receiver = EmitScalarExpr(E->getInstanceReceiver());
ReceiverType = E->getInstanceReceiver()->getType();
+ if (retainSelf) {
+ TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
+ E->getInstanceReceiver());
+ Receiver = ter.getPointer();
+ if (!ter.getInt())
+ Receiver = EmitARCRetainNonBlock(Receiver);
+ } else
+ Receiver = EmitScalarExpr(E->getInstanceReceiver());
break;
case ObjCMessageExpr::Class: {
@@ -92,6 +126,9 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
assert(OID && "Invalid Objective-C class message send");
Receiver = Runtime.GetClass(Builder, OID);
isClassMessage = true;
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
break;
}
@@ -99,6 +136,9 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReceiverType = E->getSuperType();
Receiver = LoadObjCSelf();
isSuperMessage = true;
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
break;
case ObjCMessageExpr::SuperClass:
@@ -106,14 +146,36 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
Receiver = LoadObjCSelf();
isSuperMessage = true;
isClassMessage = true;
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
break;
}
+ QualType ResultType =
+ E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+
CallArgList Args;
EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
- QualType ResultType =
- E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+ // For delegate init calls in ARC, do an unsafe store of null into
+ // self. This represents the call taking direct ownership of that
+ // value. We have to do this after emitting the other call
+ // arguments because they might also reference self, but we don't
+ // have to worry about any of them modifying self because that would
+ // be an undefined read and write of an object in unordered
+ // expressions.
+ if (isDelegateInit) {
+ assert(getLangOptions().ObjCAutoRefCount &&
+ "delegate init calls should only be marked in ARC");
+
+ // Do an unsafe store of null into self.
+ llvm::Value *selfAddr =
+ LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ assert(selfAddr && "no self entry for a delegate init call?");
+
+ Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
+ }
RValue result;
if (isSuperMessage) {
@@ -134,10 +196,54 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
Receiver, Args, OID,
E->getMethodDecl());
}
-
+
+ // For delegate init calls in ARC, implicitly store the result of
+ // the call back into self. This takes ownership of the value.
+ if (isDelegateInit) {
+ llvm::Value *selfAddr =
+ LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ llvm::Value *newSelf = result.getScalarVal();
+
+ // The delegate return type isn't necessarily a matching type; in
+ // fact, it's quite likely to be 'id'.
+ const llvm::Type *selfTy =
+ cast<llvm::PointerType>(selfAddr->getType())->getElementType();
+ newSelf = Builder.CreateBitCast(newSelf, selfTy);
+
+ Builder.CreateStore(newSelf, selfAddr);
+ }
+
return AdjustRelatedResultType(*this, E, E->getMethodDecl(), result);
}
+namespace {
+struct FinishARCDealloc : EHScopeStack::Cleanup {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+
+ const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ if (!iface->getSuperClass()) return;
+
+ bool isCategory = isa<ObjCCategoryImplDecl>(impl);
+
+ // Call [super dealloc] if we have a superclass.
+ llvm::Value *self = CGF.LoadObjCSelf();
+
+ CallArgList args;
+ CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
+ CGF.getContext().VoidTy,
+ method->getSelector(),
+ iface,
+ isCategory,
+ self,
+ /*is class msg*/ false,
+ args,
+ method);
+ }
+};
+}
+
/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
/// the LLVM function and sets the other context used by
/// CodeGenFunction.
@@ -164,8 +270,21 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
CurGD = OMD;
StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
+
+ // In ARC, certain methods get an extra cleanup.
+ if (CGM.getLangOptions().ObjCAutoRefCount &&
+ OMD->isInstanceMethod() &&
+ OMD->getSelector().isUnarySelector()) {
+ const IdentifierInfo *ident =
+ OMD->getSelector().getIdentifierInfoForSlot(0);
+ if (ident->isStr("dealloc"))
+ EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
+ }
}
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue, QualType type);
+
void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
bool IsAtomic, bool IsStrong) {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
@@ -269,6 +388,9 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
Types.ConvertType(PD->getType())));
EmitReturnOfRValue(RV, PD->getType());
+
+ // objc_getProperty does an autorelease, so we should suppress ours.
+ AutoreleaseResult = false;
} else {
const llvm::Triple &Triple = getContext().Target.getTriple();
QualType IVART = Ivar->getType();
@@ -346,18 +468,24 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
}
else {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
- Ivar, 0);
- if (PD->getType()->isReferenceType()) {
- RValue RV = RValue::get(LV.getAddress());
- EmitReturnOfRValue(RV, PD->getType());
- }
- else {
- CodeGenTypes &Types = CGM.getTypes();
- RValue RV = EmitLoadOfLValue(LV, IVART);
- RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
- Types.ConvertType(PD->getType())));
- EmitReturnOfRValue(RV, PD->getType());
+ Ivar, 0);
+ QualType propType = PD->getType();
+
+ llvm::Value *value;
+ if (propType->isReferenceType()) {
+ value = LV.getAddress();
+ } else {
+ // In ARC, we want to emit this retained.
+ if (getLangOptions().ObjCAutoRefCount &&
+ PD->getType()->isObjCRetainableType())
+ value = emitARCRetainLoadOfScalar(*this, LV, IVART);
+ else
+ value = EmitLoadOfLValue(LV).getScalarVal();
+
+ value = Builder.CreateBitCast(value, ConvertType(propType));
}
+
+ EmitReturnOfRValue(RValue::get(value), propType);
}
}
@@ -551,50 +679,35 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
FinishFunction();
}
-// FIXME: these are stolen from CGClass.cpp, which is lame.
namespace {
- struct CallArrayIvarDtor : EHScopeStack::Cleanup {
+ struct DestroyIvar : EHScopeStack::Cleanup {
+ private:
+ llvm::Value *addr;
const ObjCIvarDecl *ivar;
- llvm::Value *self;
- CallArrayIvarDtor(const ObjCIvarDecl *ivar, llvm::Value *self)
- : ivar(ivar), self(self) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- LValue lvalue =
- CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), self, ivar, 0);
-
- QualType type = ivar->getType();
- const ConstantArrayType *arrayType
- = CGF.getContext().getAsConstantArrayType(type);
- QualType baseType = CGF.getContext().getBaseElementType(arrayType);
- const CXXRecordDecl *classDecl = baseType->getAsCXXRecordDecl();
-
- llvm::Value *base
- = CGF.Builder.CreateBitCast(lvalue.getAddress(),
- CGF.ConvertType(baseType)->getPointerTo());
- CGF.EmitCXXAggrDestructorCall(classDecl->getDestructor(),
- arrayType, base);
+ CodeGenFunction::Destroyer &destroyer;
+ bool useEHCleanupForArray;
+ public:
+ DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
+ CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : addr(addr), ivar(ivar), destroyer(*destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ LValue lvalue
+ = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
+ CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
+ flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
+}
- struct CallIvarDtor : EHScopeStack::Cleanup {
- const ObjCIvarDecl *ivar;
- llvm::Value *self;
- CallIvarDtor(const ObjCIvarDecl *ivar, llvm::Value *self)
- : ivar(ivar), self(self) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- LValue lvalue =
- CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), self, ivar, 0);
-
- QualType type = ivar->getType();
- const CXXRecordDecl *classDecl = type->getAsCXXRecordDecl();
-
- CGF.EmitCXXDestructorCall(classDecl->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- lvalue.getAddress());
- }
- };
+/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
+static void destroyARCStrongWithStore(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *null = getNullForVariable(addr);
+ CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
}
static void emitCXXDestructMethod(CodeGenFunction &CGF,
@@ -609,29 +722,26 @@ static void emitCXXDestructMethod(CodeGenFunction &CGF,
ivar; ivar = ivar->getNextIvar()) {
QualType type = ivar->getType();
- // Drill down to the base element type.
- QualType baseType = type;
- const ConstantArrayType *arrayType =
- CGF.getContext().getAsConstantArrayType(baseType);
- if (arrayType) baseType = CGF.getContext().getBaseElementType(arrayType);
-
// Check whether the ivar is a destructible type.
- QualType::DestructionKind destructKind = baseType.isDestructedType();
- assert(destructKind == type.isDestructedType());
-
- switch (destructKind) {
- case QualType::DK_none:
- continue;
-
- case QualType::DK_cxx_destructor:
- if (arrayType)
- CGF.EHStack.pushCleanup<CallArrayIvarDtor>(NormalAndEHCleanup,
- ivar, self);
- else
- CGF.EHStack.pushCleanup<CallIvarDtor>(NormalAndEHCleanup,
- ivar, self);
- break;
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind) continue;
+
+ CodeGenFunction::Destroyer *destroyer = 0;
+
+ // Use a call to objc_storeStrong to destroy strong ivars, for the
+ // general benefit of the tools.
+ if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = &destroyARCStrongWithStore;
+
+ // Otherwise use the default for the destruction kind.
+ } else {
+ destroyer = &CGF.getDestroyer(dtorKind);
}
+
+ CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
+
+ CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
+ cleanupKind & EHCleanup);
}
assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
@@ -645,6 +755,9 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
// Emit .cxx_construct.
if (ctor) {
+ // Suppress the final autorelease in ARC.
+ AutoreleaseResult = false;
+
llvm::SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
E = IMP->init_end(); B != E; ++B) {
@@ -747,6 +860,16 @@ RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
llvm::Value *Receiver = LV.getPropertyRefBaseAddr();
+ if (CGM.getLangOptions().ObjCAutoRefCount) {
+ QualType receiverType;
+ if (E->isSuperReceiver())
+ receiverType = E->getSuperReceiverType();
+ else if (E->isClassReceiver())
+ receiverType = getContext().getObjCClassType();
+ else
+ receiverType = E->getBase()->getType();
+ }
+
// Accesses to 'super' follow a different code path.
if (E->isSuperReceiver())
return AdjustRelatedResultType(*this, E, method,
@@ -757,9 +880,9 @@ RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
const ObjCInterfaceDecl *ReceiverClass
= (E->isClassReceiver() ? E->getClassReceiver() : 0);
return AdjustRelatedResultType(*this, E, method,
- CGM.getObjCRuntime().
- GenerateMessageSend(*this, Return, ResultType, S,
- Receiver, CallArgList(), ReceiverClass));
+ CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Return, ResultType, S,
+ Receiver, CallArgList(), ReceiverClass));
}
void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
@@ -808,17 +931,17 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
return;
}
- // The local variable comes into scope immediately.
- AutoVarEmission variable = AutoVarEmission::invalid();
- if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
- variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
-
CGDebugInfo *DI = getDebugInfo();
if (DI) {
DI->setLocation(S.getSourceRange().getBegin());
DI->EmitRegionStart(Builder);
}
+ // The local variable comes into scope immediately.
+ AutoVarEmission variable = AutoVarEmission::invalid();
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
+ variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
+
JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
@@ -958,6 +1081,9 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
elementLValue = EmitLValue(&tempDRE);
elementType = D->getType();
elementIsVariable = true;
+
+ if (D->isARCPseudoStrong())
+ elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
} else {
elementLValue = LValue(); // suppress warning
elementType = cast<Expr>(S.getElement())->getType();
@@ -984,10 +1110,12 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Make sure we have an l-value. Yes, this gets evaluated every
// time through the loop.
- if (!elementIsVariable)
+ if (!elementIsVariable) {
elementLValue = EmitLValue(cast<Expr>(S.getElement()));
-
- EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, elementType);
+ EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
+ } else {
+ EmitScalarInit(CurrentItem, elementLValue);
+ }
// If we do have an element variable, this assignment is the end of
// its initialization.
@@ -1048,7 +1176,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
elementLValue = EmitLValue(cast<Expr>(S.getElement()));
- EmitStoreThroughLValue(RValue::get(null), elementLValue, elementType);
+ EmitStoreThroughLValue(RValue::get(null), elementLValue);
}
if (DI) {
@@ -1072,4 +1200,957 @@ void CodeGenFunction::EmitObjCAtSynchronizedStmt(
CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
}
+/// Produce the code for a CK_ObjCProduceObject. Just does a
+/// primitive retain.
+llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetain(type, value);
+}
+
+namespace {
+ struct CallObjCRelease : EHScopeStack::Cleanup {
+ CallObjCRelease(QualType type, llvm::Value *ptr, llvm::Value *condition)
+ : type(type), ptr(ptr), condition(condition) {}
+ QualType type;
+ llvm::Value *ptr;
+ llvm::Value *condition;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::Value *object;
+
+ // If we're in a conditional branch, we had to stash away in an
+ // alloca the pointer to be released.
+ llvm::BasicBlock *cont = 0;
+ if (condition) {
+ llvm::BasicBlock *release = CGF.createBasicBlock("release.yes");
+ cont = CGF.createBasicBlock("release.cont");
+
+ llvm::Value *cond = CGF.Builder.CreateLoad(condition);
+ CGF.Builder.CreateCondBr(cond, release, cont);
+ CGF.EmitBlock(release);
+ object = CGF.Builder.CreateLoad(ptr);
+ } else {
+ object = ptr;
+ }
+
+ CGF.EmitARCRelease(object, /*precise*/ true);
+
+ if (cont) CGF.EmitBlock(cont);
+ }
+ };
+}
+
+/// Produce the code for a CK_ObjCConsumeObject. Does a primitive
+/// release at the end of the full-expression.
+llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
+ llvm::Value *object) {
+ // If we're in a conditional branch, we need to make the cleanup
+ // conditional. FIXME: this really needs to be supported by the
+ // environment.
+ llvm::AllocaInst *cond;
+ llvm::Value *ptr;
+ if (isInConditionalBranch()) {
+ cond = CreateTempAlloca(Builder.getInt1Ty(), "release.cond");
+ ptr = CreateTempAlloca(object->getType(), "release.value");
+
+ // The alloca is false until we get here.
+ // FIXME: er. doesn't this need to be set at the start of the condition?
+ InitTempAlloca(cond, Builder.getFalse());
+
+ // Then it turns true.
+ Builder.CreateStore(Builder.getTrue(), cond);
+ Builder.CreateStore(object, ptr);
+ } else {
+ cond = 0;
+ ptr = object;
+ }
+
+ EHStack.pushCleanup<CallObjCRelease>(getARCCleanupKind(), type, ptr, cond);
+ return object;
+}
+
+llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetainAutorelease(type, value);
+}
+
+
+static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
+ const llvm::FunctionType *type,
+ llvm::StringRef fnName) {
+ llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
+
+ // In -fobjc-no-arc-runtime, emit weak references to the runtime
+ // support library.
+ if (!CGM.getCodeGenOpts().ObjCRuntimeHasARC)
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ f->setLinkage(llvm::Function::ExternalWeakLinkage);
+
+ return fn;
+}
+
+/// Perform an operation having the signature
+/// i8* (i8*)
+/// where a null input causes a no-op and returns null.
+static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName) {
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, CGF.Int8PtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id'.
+ const llvm::Type *origType = value->getType();
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+
+ // Cast the result back to the original type.
+ return CGF.Builder.CreateBitCast(call, origType);
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**)
+static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName) {
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, CGF.Int8PtrPtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id*'.
+ const llvm::Type *origType = addr->getType();
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.Builder.CreateCall(fn, addr);
+ call->setDoesNotThrow();
+
+ // Cast the result back to a dereference of the original type.
+ llvm::Value *result = call;
+ if (origType != CGF.Int8PtrPtrTy)
+ result = CGF.Builder.CreateBitCast(result,
+ cast<llvm::PointerType>(origType)->getElementType());
+
+ return result;
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**, i8*)
+static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName,
+ bool ignored) {
+ assert(cast<llvm::PointerType>(addr->getType())->getElementType()
+ == value->getType());
+
+ if (!fn) {
+ std::vector<llvm::Type*> argTypes(2);
+ argTypes[0] = CGF.Int8PtrPtrTy;
+ argTypes[1] = CGF.Int8PtrTy;
+
+ const llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ const llvm::Type *origType = value->getType();
+
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ llvm::CallInst *result = CGF.Builder.CreateCall2(fn, addr, value);
+ result->setDoesNotThrow();
+
+ if (ignored) return 0;
+
+ return CGF.Builder.CreateBitCast(result, origType);
+}
+
+/// Perform an operation having the following signature:
+/// void (i8**, i8**)
+static void emitARCCopyOperation(CodeGenFunction &CGF,
+ llvm::Value *dst,
+ llvm::Value *src,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName) {
+ assert(dst->getType() == src->getType());
+
+ if (!fn) {
+ std::vector<llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy);
+ const llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ dst = CGF.Builder.CreateBitCast(dst, CGF.Int8PtrPtrTy);
+ src = CGF.Builder.CreateBitCast(src, CGF.Int8PtrPtrTy);
+
+ llvm::CallInst *result = CGF.Builder.CreateCall2(fn, dst, src);
+ result->setDoesNotThrow();
+}
+
+/// Produce the code to do a retain. Based on the type, calls one of:
+/// call i8* @objc_retain(i8* %value)
+/// call i8* @objc_retainBlock(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
+ if (type->isBlockPointerType())
+ return EmitARCRetainBlock(value);
+ else
+ return EmitARCRetainNonBlock(value);
+}
+
+/// Retain the given object, with normal retain semantics.
+/// call i8* @objc_retain(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retain,
+ "objc_retain");
+}
+
+/// Retain the given block, with _Block_copy semantics.
+/// call i8* @objc_retainBlock(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainBlock,
+ "objc_retainBlock");
+}
+
+/// Retain the given object which is the result of a function call.
+/// call i8* @objc_retainAutoreleasedReturnValue(i8* %value)
+///
+/// Yes, this function name is one character away from a different
+/// call with completely different semantics.
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
+ // Fetch the void(void) inline asm which marks that we're going to
+ // retain the autoreleased return value.
+ llvm::InlineAsm *&marker
+ = CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker;
+ if (!marker) {
+ llvm::StringRef assembly
+ = CGM.getTargetCodeGenInfo()
+ .getARCRetainAutoreleasedReturnValueMarker();
+
+ // If we have an empty assembly string, there's nothing to do.
+ if (assembly.empty()) {
+
+ // Otherwise, at -O0, build an inline asm that we're going to call
+ // in a moment.
+ } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::FunctionType *type =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
+ /*variadic*/ false);
+
+ marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
+
+ // If we're at -O1 and above, we don't want to litter the code
+ // with this marker yet, so leave a breadcrumb for the ARC
+ // optimizer to pick up.
+ } else {
+ llvm::NamedMDNode *metadata =
+ CGM.getModule().getOrInsertNamedMetadata(
+ "clang.arc.retainAutoreleasedReturnValueMarker");
+ assert(metadata->getNumOperands() <= 1);
+ if (metadata->getNumOperands() == 0) {
+ llvm::Value *string = llvm::MDString::get(getLLVMContext(), assembly);
+ llvm::Value *args[] = { string };
+ metadata->addOperand(llvm::MDNode::get(getLLVMContext(), args));
+ }
+ }
+ }
+
+ // Call the marker asm if we made one, which we do only at -O0.
+ if (marker) Builder.CreateCall(marker);
+
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutoreleasedReturnValue,
+ "objc_retainAutoreleasedReturnValue");
+}
+
+/// Release the given object.
+/// call void @objc_release(i8* %value)
+void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
+ if (isa<llvm::ConstantPointerNull>(value)) return;
+
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
+ }
+
+ // Cast the argument to 'id'.
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+
+ // Call objc_release.
+ llvm::CallInst *call = Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+
+ if (!precise) {
+ llvm::SmallVector<llvm::Value*,1> args;
+ call->setMetadata("clang.imprecise_release",
+ llvm::MDNode::get(Builder.getContext(), args));
+ }
+}
+
+/// Store into a strong object. Always calls this:
+/// call void @objc_storeStrong(i8** %addr, i8* %value)
+llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
+ llvm::Value *value,
+ bool ignored) {
+ assert(cast<llvm::PointerType>(addr->getType())->getElementType()
+ == value->getType());
+
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
+ if (!fn) {
+ llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
+ const llvm::FunctionType *fnType
+ = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
+ }
+
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+ llvm::Value *castValue = Builder.CreateBitCast(value, Int8PtrTy);
+
+ Builder.CreateCall2(fn, addr, castValue)->setDoesNotThrow();
+
+ if (ignored) return 0;
+ return value;
+}
+
+/// Store into a strong object. Sometimes calls this:
+/// call void @objc_storeStrong(i8** %addr, i8* %value)
+/// Other times, breaks it down into components.
+llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
+ llvm::Value *newValue,
+ bool ignored) {
+ QualType type = dst.getType();
+ bool isBlock = type->isBlockPointerType();
+
+ // Use a store barrier at -O0 unless this is a block type or the
+ // lvalue is inadequately aligned.
+ if (shouldUseFusedARCCalls() &&
+ !isBlock &&
+ !(dst.getAlignment() && dst.getAlignment() < PointerAlignInBytes)) {
+ return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
+ }
+
+ // Otherwise, split it out.
+
+ // Retain the new value.
+ newValue = EmitARCRetain(type, newValue);
+
+ // Read the old value.
+ llvm::Value *oldValue = EmitLoadOfScalar(dst);
+
+ // Store. We do this before the release so that any deallocs won't
+ // see the old value.
+ EmitStoreOfScalar(newValue, dst);
+
+ // Finally, release the old value.
+ EmitARCRelease(oldValue, /*precise*/ false);
+
+ return newValue;
+}
+
+/// Autorelease the given object.
+/// call i8* @objc_autorelease(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_autorelease,
+ "objc_autorelease");
+}
+
+/// Autorelease the given object.
+/// call i8* @objc_autoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_autoreleaseReturnValue,
+ "objc_autoreleaseReturnValue");
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutoreleaseReturnValue,
+ "objc_retainAutoreleaseReturnValue");
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutorelease(i8* %value)
+/// or
+/// %retain = call i8* @objc_retainBlock(i8* %value)
+/// call i8* @objc_autorelease(i8* %retain)
+llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
+ llvm::Value *value) {
+ if (!type->isBlockPointerType())
+ return EmitARCRetainAutoreleaseNonBlock(value);
+
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ const llvm::Type *origType = value->getType();
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+ value = EmitARCRetainBlock(value);
+ value = EmitARCAutorelease(value);
+ return Builder.CreateBitCast(value, origType);
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutorelease(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutorelease,
+ "objc_retainAutorelease");
+}
+
+/// i8* @objc_loadWeak(i8** %addr)
+/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
+llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getARCEntrypoints().objc_loadWeak,
+ "objc_loadWeak");
+}
+
+/// i8* @objc_loadWeakRetained(i8** %addr)
+llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getARCEntrypoints().objc_loadWeakRetained,
+ "objc_loadWeakRetained");
+}
+
+/// i8* @objc_storeWeak(i8** %addr, i8* %value)
+/// Returns %value.
+llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
+ llvm::Value *value,
+ bool ignored) {
+ return emitARCStoreOperation(*this, addr, value,
+ CGM.getARCEntrypoints().objc_storeWeak,
+ "objc_storeWeak", ignored);
+}
+
+/// i8* @objc_initWeak(i8** %addr, i8* %value)
+/// Returns %value. %addr is known to not have a current weak entry.
+/// Essentially equivalent to:
+/// *addr = nil; objc_storeWeak(addr, value);
+void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
+ // If we're initializing to null, just write null to memory; no need
+ // to get the runtime involved. But don't do this if optimization
+ // is enabled, because accounting for this would make the optimizer
+ // much more complicated.
+ if (isa<llvm::ConstantPointerNull>(value) &&
+ CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ Builder.CreateStore(value, addr);
+ return;
+ }
+
+ emitARCStoreOperation(*this, addr, value,
+ CGM.getARCEntrypoints().objc_initWeak,
+ "objc_initWeak", /*ignored*/ true);
+}
+
+/// void @objc_destroyWeak(i8** %addr)
+/// Essentially objc_storeWeak(addr, nil).
+void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrPtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
+ }
+
+ // Cast the argument to 'id*'.
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+
+ llvm::CallInst *call = Builder.CreateCall(fn, addr);
+ call->setDoesNotThrow();
+}
+
+/// void @objc_moveWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Leaves %src pointing to nothing.
+/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
+void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getARCEntrypoints().objc_moveWeak,
+ "objc_moveWeak");
+}
+
+/// void @objc_copyWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Essentially
+/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
+void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getARCEntrypoints().objc_copyWeak,
+ "objc_copyWeak");
+}
+
+/// Produce the code to do a objc_autoreleasepool_push.
+/// call i8* @objc_autoreleasePoolPush(void)
+llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
+ llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
+ if (!fn) {
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Int8PtrTy, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
+ }
+
+ llvm::CallInst *call = Builder.CreateCall(fn);
+ call->setDoesNotThrow();
+
+ return call;
+}
+
+/// Produce the code to do a primitive release.
+/// call void @objc_autoreleasePoolPop(i8* %ptr)
+void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
+ assert(value->getType() == Int8PtrTy);
+
+ llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+
+ // We don't want to use a weak import here; instead we should not
+ // fall into this path.
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
+ }
+
+ llvm::CallInst *call = Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+}
+
+/// Produce the code to do an MRR version objc_autoreleasepool_push.
+/// Which is: [[NSAutoreleasePool alloc] init];
+/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
+/// init is declared as: - (id) init; in its NSObject super class.
+///
+llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(Builder);
+ // [NSAutoreleasePool alloc]
+ IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
+ Selector AllocSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ RValue AllocRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ AllocSel, Receiver, Args);
+
+ // [Receiver init]
+ Receiver = AllocRV.getScalarVal();
+ II = &CGM.getContext().Idents.get("init");
+ Selector InitSel = getContext().Selectors.getSelector(0, &II);
+ RValue InitRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ InitSel, Receiver, Args);
+ return InitRV.getScalarVal();
+}
+
+/// Produce the code to do a primitive release.
+/// [tmp drain];
+void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
+ Selector DrainSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, DrainSel, Arg, Args);
+}
+
+void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
+ CGF.EmitARCRelease(ptr, /*precise*/ true);
+}
+
+void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
+ CGF.EmitARCRelease(ptr, /*precise*/ false);
+}
+
+void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ CGF.EmitARCDestroyWeak(addr);
+}
+
+namespace {
+ struct CallObjCAutoreleasePoolObject : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitObjCAutoreleasePoolPop(Token);
+ }
+ };
+ struct CallObjCMRRAutoreleasePoolObject : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitObjCMRRAutoreleasePoolPop(Token);
+ }
+ };
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
+ if (CGM.getLangOptions().ObjCAutoRefCount)
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
+ else
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ switch (type.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ return TryEmitResult(CGF.EmitLoadOfLValue(lvalue).getScalarVal(),
+ false);
+
+ case Qualifiers::OCL_Weak:
+ return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
+ true);
+ }
+
+ llvm_unreachable("impossible lifetime!");
+ return TryEmitResult();
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ const Expr *e) {
+ e = e->IgnoreParens();
+ QualType type = e->getType();
+
+ // As a very special optimization, in ARC++, if the l-value is the
+ // result of a non-volatile assignment, do a simple retain of the
+ // result of the call to objc_storeWeak instead of reloading.
+ if (CGF.getLangOptions().CPlusPlus &&
+ !type.isVolatileQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ isa<BinaryOperator>(e) &&
+ cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
+ return TryEmitResult(CGF.EmitScalarExpr(e), false);
+
+ return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value);
+
+/// Given that the given expression is some sort of call (which does
+/// not return retained), emit a retain following it.
+static llvm::Value *emitARCRetainCall(CodeGenFunction &CGF, const Expr *e) {
+ llvm::Value *value = CGF.EmitScalarExpr(e);
+ return emitARCRetainAfterCall(CGF, value);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value) {
+ if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain immediately following the call.
+ CGF.Builder.SetInsertPoint(call->getParent(),
+ ++llvm::BasicBlock::iterator(call));
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+ } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain at the beginning of the normal destination block.
+ llvm::BasicBlock *BB = invoke->getNormalDest();
+ CGF.Builder.SetInsertPoint(BB, BB->begin());
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+
+ // Bitcasts can arise because of related-result returns. Rewrite
+ // the operand.
+ } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
+ llvm::Value *operand = bitcast->getOperand(0);
+ operand = emitARCRetainAfterCall(CGF, operand);
+ bitcast->setOperand(0, operand);
+ return bitcast;
+
+ // Generic fall-back case.
+ } else {
+ // Retain using the non-block variant: we never need to do a copy
+ // of a block that's been returned to us.
+ return CGF.EmitARCRetainNonBlock(value);
+ }
+}
+
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
+ // The desired result type, if it differs from the type of the
+ // ultimate opaque expression.
+ const llvm::Type *resultType = 0;
+
+ // If we're loading retained from a __strong xvalue, we can avoid
+ // an extra retain/release pair by zeroing out the source of this
+ // "move" operation.
+ if (e->isXValue() && !e->getType().isConstQualified() &&
+ e->getType().getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Emit the lvalue
+ LValue lv = CGF.EmitLValue(e);
+
+ // Load the object pointer and cast it to the appropriate type.
+ QualType exprType = e->getType();
+ llvm::Value *result = CGF.EmitLoadOfLValue(lv).getScalarVal();
+
+ if (resultType)
+ result = CGF.Builder.CreateBitCast(result, resultType);
+
+ // Set the source pointer to NULL.
+ llvm::Value *null
+ = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(CGF.ConvertType(exprType)));
+ CGF.EmitStoreOfScalar(null, lv);
+
+ return TryEmitResult(result, true);
+ }
+
+ while (true) {
+ e = e->IgnoreParens();
+
+ // There's a break at the end of this if-chain; anything
+ // that wants to keep looping has to explicitly continue.
+ if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
+ switch (ce->getCastKind()) {
+ // No-op casts don't change the type, so we just ignore them.
+ case CK_NoOp:
+ e = ce->getSubExpr();
+ continue;
+
+ case CK_LValueToRValue: {
+ TryEmitResult loadResult
+ = tryEmitARCRetainLoadOfScalar(CGF, ce->getSubExpr());
+ if (resultType) {
+ llvm::Value *value = loadResult.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ loadResult.setPointer(value);
+ }
+ return loadResult;
+ }
+
+ // These casts can change the type, so remember that and
+ // soldier on. We only need to remember the outermost such
+ // cast, though.
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast:
+ if (!resultType)
+ resultType = CGF.ConvertType(ce->getType());
+ e = ce->getSubExpr();
+ assert(e->getType()->hasPointerRepresentation());
+ continue;
+
+ // For consumptions, just emit the subexpression and thus elide
+ // the retain/release pair.
+ case CK_ObjCConsumeObject: {
+ llvm::Value *result = CGF.EmitScalarExpr(ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // For reclaims, emit the subexpression as a retained call and
+ // skip the consumption.
+ case CK_ObjCReclaimReturnedObject: {
+ llvm::Value *result = emitARCRetainCall(CGF, ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ case CK_GetObjCProperty: {
+ llvm::Value *result = emitARCRetainCall(CGF, ce);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ default:
+ break;
+ }
+
+ // Skip __extension__.
+ } else if (const UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
+ if (op->getOpcode() == UO_Extension) {
+ e = op->getSubExpr();
+ continue;
+ }
+
+ // For calls and message sends, use the retained-call logic.
+ // Delegate inits are a special case in that they're the only
+ // returns-retained expression that *isn't* surrounded by
+ // a consume.
+ } else if (isa<CallExpr>(e) ||
+ (isa<ObjCMessageExpr>(e) &&
+ !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
+ llvm::Value *result = emitARCRetainCall(CGF, e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // Conservatively halt the search at any other expression kind.
+ break;
+ }
+
+ // We didn't find an obvious production, so emit what we've got and
+ // tell the caller that we didn't manage to retain.
+ llvm::Value *result = CGF.EmitScalarExpr(e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, false);
+}
+
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = CGF.EmitARCRetain(type, value);
+ return value;
+}
+
+/// EmitARCRetainScalarExpr - Semantically equivalent to
+/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
+/// best-effort attempt to peephole expressions that naturally produce
+/// retained objects.
+llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = EmitARCRetain(e->getType(), value);
+ return value;
+}
+
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (result.getInt())
+ value = EmitARCAutorelease(value);
+ else
+ value = EmitARCRetainAutorelease(e->getType(), value);
+ return value;
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
+ bool ignored) {
+ // Evaluate the RHS first.
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
+ llvm::Value *value = result.getPointer();
+
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ // If the RHS was emitted retained, expand this.
+ if (result.getInt()) {
+ llvm::Value *oldValue =
+ EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatileQualified(),
+ lvalue.getAlignment(), e->getType(),
+ lvalue.getTBAAInfo());
+ EmitStoreOfScalar(value, lvalue.getAddress(),
+ lvalue.isVolatileQualified(), lvalue.getAlignment(),
+ e->getType(), lvalue.getTBAAInfo());
+ EmitARCRelease(oldValue, /*precise*/ false);
+ } else {
+ value = EmitARCStoreStrong(lvalue, value, ignored);
+ }
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
+ llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ EmitStoreOfScalar(value, lvalue.getAddress(),
+ lvalue.isVolatileQualified(), lvalue.getAlignment(),
+ e->getType(), lvalue.getTBAAInfo());
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
+ const ObjCAutoreleasePoolStmt &ARPS) {
+ const Stmt *subStmt = ARPS.getSubStmt();
+ const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getLBracLoc());
+ DI->EmitRegionStart(Builder);
+ }
+
+ // Keep track of the current cleanup stack depth.
+ RunCleanupsScope Scope(*this);
+ if (CGM.getCodeGenOpts().ObjCRuntimeHasARC) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
+ } else {
+ llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
+ }
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end(); I != E; ++I)
+ EmitStmt(*I);
+
+ if (DI) {
+ DI->setLocation(S.getRBracLoc());
+ DI->EmitRegionEnd(Builder);
+ }
+}
+
+/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
+/// make sure it survives garbage collection until this point.
+void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
+ // We just use an inline assembly.
+ llvm::Type *paramTypes[] = { VoidPtrTy };
+ llvm::FunctionType *extenderType
+ = llvm::FunctionType::get(VoidTy, paramTypes, /*variadic*/ false);
+ llvm::Value *extender
+ = llvm::InlineAsm::get(extenderType,
+ /* assembly */ "",
+ /* constraints */ "r",
+ /* side effects */ true);
+
+ object = Builder.CreateBitCast(object, VoidPtrTy);
+ Builder.CreateCall(extender, object)->setDoesNotThrow();
+}
+
CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index f0993c5..61027fe 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -50,7 +50,7 @@ namespace {
/// avoids constructing the type more than once if it's used more than once.
class LazyRuntimeFunction {
CodeGenModule *CGM;
- std::vector<const llvm::Type*> ArgTys;
+ std::vector<llvm::Type*> ArgTys;
const char *FunctionName;
llvm::Constant *Function;
public:
@@ -63,14 +63,14 @@ class LazyRuntimeFunction {
/// of the arguments.
END_WITH_NULL
void init(CodeGenModule *Mod, const char *name,
- const llvm::Type *RetTy, ...) {
+ llvm::Type *RetTy, ...) {
CGM =Mod;
FunctionName = name;
Function = 0;
ArgTys.clear();
va_list Args;
va_start(Args, RetTy);
- while (const llvm::Type *ArgTy = va_arg(Args, const llvm::Type*))
+ while (llvm::Type *ArgTy = va_arg(Args, llvm::Type*))
ArgTys.push_back(ArgTy);
va_end(Args);
// Push the return type on at the end so we can pop it off easily
@@ -118,24 +118,24 @@ protected:
/// LLVM type for selectors. Opaque pointer (i8*) unless a header declaring
/// SEL is included in a header somewhere, in which case it will be whatever
/// type is declared in that header, most likely {i8*, i8*}.
- const llvm::PointerType *SelectorTy;
+ llvm::PointerType *SelectorTy;
/// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the
/// places where it's used
const llvm::IntegerType *Int8Ty;
/// Pointer to i8 - LLVM type of char*, for all of the places where the
/// runtime needs to deal with C strings.
- const llvm::PointerType *PtrToInt8Ty;
+ llvm::PointerType *PtrToInt8Ty;
/// Instance Method Pointer type. This is a pointer to a function that takes,
/// at a minimum, an object and a selector, and is the generic type for
/// Objective-C methods. Due to differences between variadic / non-variadic
/// calling conventions, it must always be cast to the correct type before
/// actually being used.
- const llvm::PointerType *IMPTy;
+ llvm::PointerType *IMPTy;
/// Type of an untyped Objective-C object. Clang treats id as a built-in type
/// when compiling Objective-C code, so this may be an opaque pointer (i8*),
/// but if the runtime header declaring it is included then it may be a
/// pointer to a structure.
- const llvm::PointerType *IdTy;
+ llvm::PointerType *IdTy;
/// Pointer to a pointer to an Objective-C object. Used in the new ABI
/// message lookup function and some GC-related functions.
const llvm::PointerType *PtrToIdTy;
@@ -143,15 +143,15 @@ protected:
/// call Objective-C methods.
CanQualType ASTIdTy;
/// LLVM type for C int type.
- const llvm::IntegerType *IntTy;
+ llvm::IntegerType *IntTy;
/// LLVM type for an opaque pointer. This is identical to PtrToInt8Ty, but is
/// used in the code to document the difference between i8* meaning a pointer
/// to a C string and i8* meaning a pointer to some opaque type.
- const llvm::PointerType *PtrTy;
+ llvm::PointerType *PtrTy;
/// LLVM type for C long type. The runtime uses this in a lot of places where
/// it should be using intptr_t, but we can't fix this without breaking
/// compatibility with GCC...
- const llvm::IntegerType *LongTy;
+ llvm::IntegerType *LongTy;
/// LLVM type for C size_t. Used in various runtime data structures.
const llvm::IntegerType *SizeTy;
/// LLVM type for C ptrdiff_t. Mainly used in property accessor functions.
@@ -392,6 +392,9 @@ private:
/// Emits a reference to a class. This allows the linker to object if there
/// is no class of the matching name.
void EmitClassRef(const std::string &className);
+ /// Emits a pointer to the named class
+ llvm::Value *GetClassNamed(CGBuilderTy &Builder, const std::string &Name,
+ bool isWeak);
protected:
/// Looks up the method for sending a message to the specified object. This
/// mechanism differs between the GCC and GNU runtimes, so this method must be
@@ -484,6 +487,7 @@ public:
virtual llvm::Value *EmitIvarOffset(CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
+ virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
virtual llvm::Constant *BuildGCBlockLayout(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
return NULLPtr;
@@ -527,7 +531,7 @@ protected:
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
PtrToObjCSuperTy), cmd};
- return Builder.CreateCall(MsgLookupSuperFn, lookupArgs, lookupArgs+2);
+ return Builder.CreateCall(MsgLookupSuperFn, lookupArgs);
}
public:
CGObjCGCC(CodeGenModule &Mod) : CGObjCGNU(Mod, 8, 2) {
@@ -596,15 +600,14 @@ class CGObjCGNUstep : public CGObjCGNU {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
- llvm::CallInst *slot = Builder.CreateCall(SlotLookupSuperFn, lookupArgs,
- lookupArgs+2);
+ llvm::CallInst *slot = Builder.CreateCall(SlotLookupSuperFn, lookupArgs);
slot->setOnlyReadsMemory();
return Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
}
public:
CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) {
- llvm::StructType *SlotStructTy = llvm::StructType::get(VMContext, PtrTy,
+ llvm::StructType *SlotStructTy = llvm::StructType::get(PtrTy,
PtrTy, PtrTy, IntTy, IMPTy, NULL);
SlotTy = llvm::PointerType::getUnqual(SlotStructTy);
// Slot_t objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
@@ -615,7 +618,7 @@ class CGObjCGNUstep : public CGObjCGNU {
PtrToObjCSuperTy, SelectorTy, NULL);
// If we're in ObjC++ mode, then we want to make
if (CGM.getLangOptions().CPlusPlus) {
- const llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void *__cxa_begin_catch(void *e)
EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, NULL);
// void __cxa_end_catch(void)
@@ -705,10 +708,10 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
}
PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
- ObjCSuperTy = llvm::StructType::get(VMContext, IdTy, IdTy, NULL);
+ ObjCSuperTy = llvm::StructType::get(IdTy, IdTy, NULL);
PtrToObjCSuperTy = llvm::PointerType::getUnqual(ObjCSuperTy);
- const llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void objc_exception_throw(id);
ExceptionThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, NULL);
@@ -736,16 +739,19 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
PtrDiffTy, BoolTy, BoolTy, NULL);
// IMP type
- const llvm::Type *IMPArgs[] = { IdTy, SelectorTy };
+ llvm::Type *IMPArgs[] = { IdTy, SelectorTy };
IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs,
true));
+ const LangOptions &Opts = CGM.getLangOptions();
+ if ((Opts.getGCMode() != LangOptions::NonGC) || Opts.ObjCAutoRefCount)
+ RuntimeVersion = 10;
+
// Don't bother initialising the GC stuff unless we're compiling in GC mode
- if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ if (Opts.getGCMode() != LangOptions::NonGC) {
// This is a bit of an hack. We should sort this out by having a proper
// CGObjCGNUstep subclass for GC, but we may want to really support the old
// ABI and GC added in ObjectiveC2.framework, so we fudge it a bit for now
- RuntimeVersion = 10;
// Get selectors needed in GC mode
RetainSel = GetNullarySelector("retain", CGM.getContext());
ReleaseSel = GetNullarySelector("release", CGM.getContext());
@@ -772,24 +778,38 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
}
}
-// This has to perform the lookup every time, since posing and related
-// techniques can modify the name -> class mapping.
-llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
- const ObjCInterfaceDecl *OID) {
- llvm::Value *ClassName = CGM.GetAddrOfConstantCString(OID->getNameAsString());
+llvm::Value *CGObjCGNU::GetClassNamed(CGBuilderTy &Builder,
+ const std::string &Name,
+ bool isWeak) {
+ llvm::Value *ClassName = CGM.GetAddrOfConstantCString(Name);
// With the incompatible ABI, this will need to be replaced with a direct
// reference to the class symbol. For the compatible nonfragile ABI we are
// still performing this lookup at run time but emitting the symbol for the
// class externally so that we can make the switch later.
- EmitClassRef(OID->getNameAsString());
+ //
+ // Libobjc2 contains an LLVM pass that replaces calls to objc_lookup_class
+ // with memoized versions or with static references if it's safe to do so.
+ if (!isWeak)
+ EmitClassRef(Name);
ClassName = Builder.CreateStructGEP(ClassName, 0);
+ llvm::Type *ArgTys[] = { PtrToInt8Ty };
llvm::Constant *ClassLookupFn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true),
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, ArgTys, true),
"objc_lookup_class");
return Builder.CreateCall(ClassLookupFn, ClassName);
}
+// This has to perform the lookup every time, since posing and related
+// techniques can modify the name -> class mapping.
+llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) {
+ return GetClassNamed(Builder, OID->getNameAsString(), OID->isWeakImported());
+}
+llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ return GetClassNamed(Builder, "NSAutoreleasePool", false);
+}
+
llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
const std::string &TypeEncoding, bool lval) {
@@ -908,7 +928,7 @@ llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
fields.push_back(Vtable);
fields.push_back(typeName);
llvm::Constant *TI =
- MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
NULL), fields, "__objc_eh_typeinfo_" + className,
llvm::GlobalValue::LinkOnceODRLinkage);
return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
@@ -929,7 +949,7 @@ llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
Ivars.push_back(MakeConstantString(Str));
Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
llvm::Constant *ObjCStr = MakeGlobal(
- llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
+ llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
Ivars, ".objc_str");
ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
ObjCStrings[Str] = ObjCStr;
@@ -969,7 +989,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
ActualArgs.add(RValue::get(EnforceType(Builder, Receiver, IdTy)), ASTIdTy);
ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
- ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ ActualArgs.addFrom(CallArgs);
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
@@ -979,11 +999,13 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
if (isCategoryImpl) {
llvm::Constant *classLookupFunction = 0;
if (IsClassMessage) {
+ llvm::Type *ArgTys[] = { PtrTy };
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- IdTy, PtrTy, true), "objc_get_meta_class");
+ IdTy, ArgTys, true), "objc_get_meta_class");
} else {
+ llvm::Type *ArgTys[] = { PtrTy };
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- IdTy, PtrTy, true), "objc_get_class");
+ IdTy, ArgTys, true), "objc_get_class");
}
ReceiverClass = Builder.CreateCall(classLookupFunction,
MakeConstantString(Class->getNameAsString()));
@@ -1012,13 +1034,13 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
// Cast the pointer to a simplified version of the class structure
ReceiverClass = Builder.CreateBitCast(ReceiverClass,
llvm::PointerType::getUnqual(
- llvm::StructType::get(VMContext, IdTy, IdTy, NULL)));
+ llvm::StructType::get(IdTy, IdTy, NULL)));
// Get the superclass pointer
ReceiverClass = Builder.CreateStructGEP(ReceiverClass, 1);
// Load the superclass pointer
ReceiverClass = Builder.CreateLoad(ReceiverClass);
// Construct the structure used to look up the IMP
- llvm::StructType *ObjCSuperTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCSuperTy = llvm::StructType::get(
Receiver->getType(), IdTy, NULL);
llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy);
@@ -1121,7 +1143,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
CallArgList ActualArgs;
ActualArgs.add(RValue::get(Receiver), ASTIdTy);
ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
- ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ ActualArgs.addFrom(CallArgs);
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
@@ -1187,7 +1209,7 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const llvm::StringRef &ClassName,
if (MethodSels.empty())
return NULLPtr;
// Get the method structure type.
- llvm::StructType *ObjCMethodTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCMethodTy = llvm::StructType::get(
PtrToInt8Ty, // Really a selector, but the runtime creates it us.
PtrToInt8Ty, // Method types
IMPTy, //Method pointer
@@ -1217,18 +1239,14 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const llvm::StringRef &ClassName,
Methods);
// Structure containing list pointer, array and array count
- llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields;
- llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get(VMContext);
- llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy);
- llvm::StructType *ObjCMethodListTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCMethodListTy =
+ llvm::StructType::createNamed(VMContext, "");
+ llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(ObjCMethodListTy);
+ ObjCMethodListTy->setBody(
NextPtrTy,
IntTy,
ObjCMethodArrayTy,
NULL);
- // Refine next pointer type to concrete type
- llvm::cast<llvm::OpaqueType>(
- OpaqueNextTy.get())->refineAbstractTypeTo(ObjCMethodListTy);
- ObjCMethodListTy = llvm::cast<llvm::StructType>(OpaqueNextTy.get());
Methods.clear();
Methods.push_back(llvm::ConstantPointerNull::get(
@@ -1249,7 +1267,7 @@ llvm::Constant *CGObjCGNU::GenerateIvarList(
if (IvarNames.size() == 0)
return NULLPtr;
// Get the method structure type.
- llvm::StructType *ObjCIvarTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(
PtrToInt8Ty,
PtrToInt8Ty,
IntTy,
@@ -1273,7 +1291,7 @@ llvm::Constant *CGObjCGNU::GenerateIvarList(
Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
// Structure containing array and array count
- llvm::StructType *ObjCIvarListTy = llvm::StructType::get(VMContext, IntTy,
+ llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
ObjCIvarArrayTy,
NULL);
@@ -1302,7 +1320,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
// Fields marked New ABI are part of the GNUstep runtime. We emit them
// anyway; the classes will still work with the GNU runtime, they will just
// be ignored.
- llvm::StructType *ClassTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ClassTy = llvm::StructType::get(
PtrToInt8Ty, // class_pointer
PtrToInt8Ty, // super_class
PtrToInt8Ty, // name
@@ -1359,7 +1377,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) {
// Get the method structure type.
- llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
PtrToInt8Ty,
NULL);
@@ -1375,7 +1393,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
MethodNames.size());
llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
Methods);
- llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
IntTy, ObjCMethodArrayTy, NULL);
Methods.clear();
Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
@@ -1388,7 +1406,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(
const llvm::SmallVectorImpl<std::string> &Protocols) {
llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
Protocols.size());
- llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
PtrTy, //Should be a recurisve pointer, but it's always NULL here.
SizeTy,
ProtocolArrayTy,
@@ -1435,7 +1453,7 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
- llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
PtrToInt8Ty,
ProtocolList->getType(),
MethodList->getType(),
@@ -1521,7 +1539,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
// The isSynthesized value is always set to 0 in a protocol. It exists to
// simplify the runtime library by allowing it to use the same data
// structures for protocol metadata everywhere.
- llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
PtrToInt8Ty, NULL);
std::vector<llvm::Constant*> Properties;
@@ -1573,7 +1591,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
{llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
llvm::Constant *PropertyListInit =
- llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+ llvm::ConstantStruct::getAnon(PropertyListInitFields);
llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
PropertyListInit, ".objc_property_list");
@@ -1586,7 +1604,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
OptionalPropertyArray };
llvm::Constant *OptionalPropertyListInit =
- llvm::ConstantStruct::get(VMContext, OptionalPropertyListInitFields, 3, false);
+ llvm::ConstantStruct::getAnon(OptionalPropertyListInitFields);
llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
OptionalPropertyListInit->getType(), false,
llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
@@ -1594,7 +1612,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
- llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
PtrToInt8Ty,
ProtocolList->getType(),
InstanceMethodList->getType(),
@@ -1641,7 +1659,7 @@ void CGObjCGNU::GenerateProtocolHolderCategory(void) {
// Protocol list
llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
ExistingProtocols.size());
- llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
PtrTy, //Should be a recurisve pointer, but it's always NULL here.
SizeTy,
ProtocolArrayTy,
@@ -1664,7 +1682,7 @@ void CGObjCGNU::GenerateProtocolHolderCategory(void) {
Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
ProtocolElements, ".objc_protocol_list"), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
- MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
}
@@ -1718,7 +1736,7 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Elements.push_back(llvm::ConstantExpr::getBitCast(
GenerateProtocolList(Protocols), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
- MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
}
@@ -1729,7 +1747,7 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
//
// Property metadata: name, attributes, isSynthesized, setter name, setter
// types, getter name, getter types.
- llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
PtrToInt8Ty, NULL);
std::vector<llvm::Constant*> Properties;
@@ -1788,7 +1806,7 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
{llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
llvm::Constant *PropertyListInit =
- llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+ llvm::ConstantStruct::getAnon(PropertyListInitFields);
return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
llvm::GlobalValue::InternalLinkage, PropertyListInit,
".objc_property_list");
@@ -1859,13 +1877,25 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
Offset = BaseOffset - superInstanceSize;
}
- IvarOffsets.push_back(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset));
- IvarOffsetValues.push_back(new llvm::GlobalVariable(TheModule, IntTy,
+ llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
+ // Create the direct offset value
+ std::string OffsetName = "__objc_ivar_offset_value_" + ClassName +"." +
+ IVD->getNameAsString();
+ llvm::GlobalVariable *OffsetVar = TheModule.getGlobalVariable(OffsetName);
+ if (OffsetVar) {
+ OffsetVar->setInitializer(OffsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ OffsetVar->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else
+ OffsetVar = new llvm::GlobalVariable(TheModule, IntTy,
false, llvm::GlobalValue::ExternalLinkage,
- llvm::ConstantInt::get(IntTy, Offset),
+ OffsetValue,
"__objc_ivar_offset_value_" + ClassName +"." +
- IVD->getNameAsString()));
+ IVD->getNameAsString());
+ IvarOffsets.push_back(OffsetValue);
+ IvarOffsetValues.push_back(OffsetVar);
}
llvm::GlobalVariable *IvarOffsetArray =
MakeGlobalArray(PtrToIntTy, IvarOffsetValues, ".ivar.offsets");
@@ -2005,18 +2035,12 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
SelectorTy->getElementType());
- const llvm::Type *SelStructPtrTy = SelectorTy;
+ llvm::Type *SelStructPtrTy = SelectorTy;
if (SelStructTy == 0) {
- SelStructTy = llvm::StructType::get(VMContext, PtrToInt8Ty,
- PtrToInt8Ty, NULL);
+ SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, NULL);
SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
}
- // Name the ObjC types to make the IR a bit easier to read
- TheModule.addTypeName(".objc_selector", SelStructPtrTy);
- TheModule.addTypeName(".objc_id", IdTy);
- TheModule.addTypeName(".objc_imp", IMPTy);
-
std::vector<llvm::Constant*> Elements;
llvm::Constant *Statics = NULLPtr;
// Generate statics list:
@@ -2034,7 +2058,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
ConstantStrings));
llvm::StructType *StaticsListTy =
- llvm::StructType::get(VMContext, PtrToInt8Ty, StaticsArrayTy, NULL);
+ llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, NULL);
llvm::Type *StaticsListPtrTy =
llvm::PointerType::getUnqual(StaticsListTy);
Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
@@ -2049,8 +2073,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Array of classes, categories, and constant objects
llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
Classes.size() + Categories.size() + 2);
- llvm::StructType *SymTabTy = llvm::StructType::get(VMContext,
- LongTy, SelStructPtrTy,
+ llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
llvm::Type::getInt16Ty(VMContext),
llvm::Type::getInt16Ty(VMContext),
ClassListTy, NULL);
@@ -2132,10 +2155,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// The symbol table is contained in a module which has some version-checking
// constants
- llvm::StructType * ModuleTy = llvm::StructType::get(VMContext, LongTy, LongTy,
+ llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy),
- (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) ? NULL : IntTy,
- NULL);
+ (RuntimeVersion >= 10) ? IntTy : NULL, NULL);
Elements.clear();
// Runtime version, used for ABI compatibility checking.
Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
@@ -2154,14 +2176,21 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Elements.push_back(MakeConstantString(path, ".objc_source_file_name"));
Elements.push_back(SymTab);
- switch (CGM.getLangOptions().getGCMode()) {
- case LangOptions::GCOnly:
+ if (RuntimeVersion >= 10)
+ switch (CGM.getLangOptions().getGCMode()) {
+ case LangOptions::GCOnly:
Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
- case LangOptions::NonGC:
break;
- case LangOptions::HybridGC:
- Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
- }
+ case LangOptions::NonGC:
+ if (CGM.getLangOptions().ObjCAutoRefCount)
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ else
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 0));
+ break;
+ case LangOptions::HybridGC:
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ break;
+ }
llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
@@ -2176,9 +2205,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
CGBuilderTy Builder(VMContext);
Builder.SetInsertPoint(EntryBB);
+ llvm::Type *ArgTys[] = { llvm::PointerType::getUnqual(ModuleTy) };
llvm::FunctionType *FT =
- llvm::FunctionType::get(Builder.getVoidTy(),
- llvm::PointerType::getUnqual(ModuleTy), true);
+ llvm::FunctionType::get(Builder.getVoidTy(), ArgTys, true);
llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
Builder.CreateCall(Register, Module);
Builder.CreateRetVoid();
@@ -2280,8 +2309,8 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
CGF.Builder.CreateCall(ExceptionThrowFn, ExceptionAsObject);
CGF.Builder.CreateUnreachable();
} else {
- CGF.Builder.CreateInvoke(ExceptionThrowFn, UnwindBB, UnwindBB, &ExceptionAsObject,
- &ExceptionAsObject+1);
+ CGF.Builder.CreateInvoke(ExceptionThrowFn, UnwindBB, UnwindBB,
+ ExceptionAsObject);
}
// Clear the insertion point to indicate we are in unreachable code.
CGF.Builder.ClearInsertionPoint();
@@ -2422,10 +2451,19 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
const ObjCIvarDecl *Ivar) {
if (CGM.getLangOptions().ObjCNonFragileABI) {
Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
- return CGF.Builder.CreateZExtOrBitCast(
- CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
- ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar")),
- PtrDiffTy);
+ if (RuntimeVersion < 10)
+ return CGF.Builder.CreateZExtOrBitCast(
+ CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar")),
+ PtrDiffTy);
+ std::string name = "__objc_ivar_offset_value_" +
+ Interface->getNameAsString() +"." + Ivar->getNameAsString();
+ llvm::Value *Offset = TheModule.getGlobalVariable(name);
+ if (!Offset)
+ Offset = new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::CommonLinkage,
+ 0, name);
+ return CGF.Builder.CreateLoad(Offset);
}
uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
return llvm::ConstantInt::get(PtrDiffTy, Offset, "ivar");
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 8c3e9a3..010b9e1 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -63,10 +63,13 @@ private:
/// The default messenger, used for sends whose ABI is unchanged from
/// the all-integer/pointer case.
llvm::Constant *getMessageSendFn() const {
- const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ // Add the non-lazy-bind attribute, since objc_msgSend is likely to
+ // be called a lot.
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
- "objc_msgSend");
+ "objc_msgSend",
+ llvm::Attribute::NonLazyBind);
}
/// void objc_msgSend_stret (id, SEL, ...)
@@ -75,7 +78,7 @@ private:
/// by indirect reference in the first argument, and therefore the
/// self and selector parameters are shifted over by one.
llvm::Constant *getMessageSendStretFn() const {
- const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy,
params, true),
"objc_msgSend_stret");
@@ -88,7 +91,7 @@ private:
/// floating-point stack; without a special entrypoint, the nil case
/// would be unbalanced.
llvm::Constant *getMessageSendFpretFn() const {
- const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::Type::getDoubleTy(VMContext),
params, true),
@@ -102,7 +105,7 @@ private:
/// semantics. The class passed is the superclass of the current
/// class.
llvm::Constant *getMessageSendSuperFn() const {
- const llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSendSuper");
@@ -113,7 +116,7 @@ private:
/// A slightly different messenger used for super calls. The class
/// passed is the current class.
llvm::Constant *getMessageSendSuperFn2() const {
- const llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSendSuper2");
@@ -124,7 +127,7 @@ private:
///
/// The messenger used for super calls which return an aggregate indirectly.
llvm::Constant *getMessageSendSuperStretFn() const {
- const llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, true),
"objc_msgSendSuper_stret");
@@ -135,7 +138,7 @@ private:
///
/// objc_msgSendSuper_stret with the super2 semantics.
llvm::Constant *getMessageSendSuperStretFn2() const {
- const llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, true),
"objc_msgSendSuper2_stret");
@@ -155,20 +158,20 @@ protected:
CodeGen::CodeGenModule &CGM;
public:
- const llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
- const llvm::Type *Int8PtrTy;
+ llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
+ llvm::Type *Int8PtrTy;
/// ObjectPtrTy - LLVM type for object handles (typeof(id))
- const llvm::Type *ObjectPtrTy;
+ llvm::Type *ObjectPtrTy;
/// PtrObjectPtrTy - LLVM type for id *
- const llvm::Type *PtrObjectPtrTy;
+ llvm::Type *PtrObjectPtrTy;
/// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
- const llvm::Type *SelectorPtrTy;
+ llvm::Type *SelectorPtrTy;
/// ProtocolPtrTy - LLVM type for external protocol handles
/// (typeof(Protocol))
- const llvm::Type *ExternalProtocolPtrTy;
+ llvm::Type *ExternalProtocolPtrTy;
// SuperCTy - clang type for struct objc_super.
QualType SuperCTy;
@@ -176,28 +179,28 @@ public:
QualType SuperPtrCTy;
/// SuperTy - LLVM type for struct objc_super.
- const llvm::StructType *SuperTy;
+ llvm::StructType *SuperTy;
/// SuperPtrTy - LLVM type for struct objc_super *.
- const llvm::Type *SuperPtrTy;
+ llvm::Type *SuperPtrTy;
/// PropertyTy - LLVM type for struct objc_property (struct _prop_t
/// in GCC parlance).
- const llvm::StructType *PropertyTy;
+ llvm::StructType *PropertyTy;
/// PropertyListTy - LLVM type for struct objc_property_list
/// (_prop_list_t in GCC parlance).
- const llvm::StructType *PropertyListTy;
+ llvm::StructType *PropertyListTy;
/// PropertyListPtrTy - LLVM type for struct objc_property_list*.
- const llvm::Type *PropertyListPtrTy;
+ llvm::Type *PropertyListPtrTy;
// MethodTy - LLVM type for struct objc_method.
- const llvm::StructType *MethodTy;
+ llvm::StructType *MethodTy;
/// CacheTy - LLVM type for struct objc_cache.
- const llvm::Type *CacheTy;
+ llvm::Type *CacheTy;
/// CachePtrTy - LLVM type for struct objc_cache *.
- const llvm::Type *CachePtrTy;
-
+ llvm::Type *CachePtrTy;
+
llvm::Constant *getGetPropertyFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -270,7 +273,7 @@ public:
/// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
llvm::Constant *getGcReadWeakFn() {
// id objc_read_weak (id *)
- const llvm::Type *args[] = { ObjectPtrTy->getPointerTo() };
+ llvm::Type *args[] = { ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
@@ -279,7 +282,7 @@ public:
/// GcAssignWeakFn -- LLVM objc_assign_weak function.
llvm::Constant *getGcAssignWeakFn() {
// id objc_assign_weak (id, id *)
- const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
@@ -288,7 +291,7 @@ public:
/// GcAssignGlobalFn -- LLVM objc_assign_global function.
llvm::Constant *getGcAssignGlobalFn() {
// id objc_assign_global(id, id *)
- const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
@@ -297,7 +300,7 @@ public:
/// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
llvm::Constant *getGcAssignThreadLocalFn() {
// id objc_assign_threadlocal(id src, id * dest)
- const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
@@ -306,8 +309,8 @@ public:
/// GcAssignIvarFn -- LLVM objc_assign_ivar function.
llvm::Constant *getGcAssignIvarFn() {
// id objc_assign_ivar(id, id *, ptrdiff_t)
- const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(),
- CGM.PtrDiffTy };
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(),
+ CGM.PtrDiffTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
@@ -316,7 +319,7 @@ public:
/// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
llvm::Constant *GcMemmoveCollectableFn() {
// void *objc_memmove_collectable(void *dst, const void *src, size_t size)
- const llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy };
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy };
llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
}
@@ -324,7 +327,7 @@ public:
/// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
llvm::Constant *getGcAssignStrongCastFn() {
// id objc_assign_strongCast(id, id *)
- const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
@@ -333,7 +336,7 @@ public:
/// ExceptionThrowFn - LLVM objc_exception_throw function.
llvm::Constant *getExceptionThrowFn() {
// void objc_exception_throw(id)
- const llvm::Type *args[] = { ObjectPtrTy };
+ llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
@@ -349,7 +352,7 @@ public:
/// SyncEnterFn - LLVM object_sync_enter function.
llvm::Constant *getSyncEnterFn() {
// void objc_sync_enter (id)
- const llvm::Type *args[] = { ObjectPtrTy };
+ llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
@@ -358,7 +361,7 @@ public:
/// SyncExitFn - LLVM object_sync_exit function.
llvm::Constant *getSyncExitFn() {
// void objc_sync_exit (id)
- const llvm::Type *args[] = { ObjectPtrTy };
+ llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
@@ -397,62 +400,62 @@ public:
class ObjCTypesHelper : public ObjCCommonTypesHelper {
public:
/// SymtabTy - LLVM type for struct objc_symtab.
- const llvm::StructType *SymtabTy;
+ llvm::StructType *SymtabTy;
/// SymtabPtrTy - LLVM type for struct objc_symtab *.
- const llvm::Type *SymtabPtrTy;
+ llvm::Type *SymtabPtrTy;
/// ModuleTy - LLVM type for struct objc_module.
- const llvm::StructType *ModuleTy;
+ llvm::StructType *ModuleTy;
/// ProtocolTy - LLVM type for struct objc_protocol.
- const llvm::StructType *ProtocolTy;
+ llvm::StructType *ProtocolTy;
/// ProtocolPtrTy - LLVM type for struct objc_protocol *.
- const llvm::Type *ProtocolPtrTy;
+ llvm::Type *ProtocolPtrTy;
/// ProtocolExtensionTy - LLVM type for struct
/// objc_protocol_extension.
- const llvm::StructType *ProtocolExtensionTy;
+ llvm::StructType *ProtocolExtensionTy;
/// ProtocolExtensionTy - LLVM type for struct
/// objc_protocol_extension *.
- const llvm::Type *ProtocolExtensionPtrTy;
+ llvm::Type *ProtocolExtensionPtrTy;
/// MethodDescriptionTy - LLVM type for struct
/// objc_method_description.
- const llvm::StructType *MethodDescriptionTy;
+ llvm::StructType *MethodDescriptionTy;
/// MethodDescriptionListTy - LLVM type for struct
/// objc_method_description_list.
- const llvm::StructType *MethodDescriptionListTy;
+ llvm::StructType *MethodDescriptionListTy;
/// MethodDescriptionListPtrTy - LLVM type for struct
/// objc_method_description_list *.
- const llvm::Type *MethodDescriptionListPtrTy;
+ llvm::Type *MethodDescriptionListPtrTy;
/// ProtocolListTy - LLVM type for struct objc_property_list.
- const llvm::Type *ProtocolListTy;
+ llvm::StructType *ProtocolListTy;
/// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
- const llvm::Type *ProtocolListPtrTy;
+ llvm::Type *ProtocolListPtrTy;
/// CategoryTy - LLVM type for struct objc_category.
- const llvm::StructType *CategoryTy;
+ llvm::StructType *CategoryTy;
/// ClassTy - LLVM type for struct objc_class.
- const llvm::StructType *ClassTy;
+ llvm::StructType *ClassTy;
/// ClassPtrTy - LLVM type for struct objc_class *.
- const llvm::Type *ClassPtrTy;
+ llvm::Type *ClassPtrTy;
/// ClassExtensionTy - LLVM type for struct objc_class_ext.
- const llvm::StructType *ClassExtensionTy;
+ llvm::StructType *ClassExtensionTy;
/// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
- const llvm::Type *ClassExtensionPtrTy;
+ llvm::Type *ClassExtensionPtrTy;
// IvarTy - LLVM type for struct objc_ivar.
- const llvm::StructType *IvarTy;
+ llvm::StructType *IvarTy;
/// IvarListTy - LLVM type for struct objc_ivar_list.
- const llvm::Type *IvarListTy;
+ llvm::Type *IvarListTy;
/// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
- const llvm::Type *IvarListPtrTy;
+ llvm::Type *IvarListPtrTy;
/// MethodListTy - LLVM type for struct objc_method_list.
- const llvm::Type *MethodListTy;
+ llvm::Type *MethodListTy;
/// MethodListPtrTy - LLVM type for struct objc_method_list *.
- const llvm::Type *MethodListPtrTy;
+ llvm::Type *MethodListPtrTy;
/// ExceptionDataTy - LLVM type for struct _objc_exception_data.
- const llvm::Type *ExceptionDataTy;
-
+ llvm::Type *ExceptionDataTy;
+
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
llvm::Constant *getExceptionTryEnterFn() {
- const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_enter");
@@ -460,7 +463,7 @@ public:
/// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
llvm::Constant *getExceptionTryExitFn() {
- const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_exit");
@@ -468,7 +471,7 @@ public:
/// ExceptionExtractFn - LLVM objc_exception_extract function.
llvm::Constant *getExceptionExtractFn() {
- const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, false),
"objc_exception_extract");
@@ -476,7 +479,7 @@ public:
/// ExceptionMatchFn - LLVM objc_exception_match function.
llvm::Constant *getExceptionMatchFn() {
- const llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy };
+ llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.Int32Ty, params, false),
"objc_exception_match");
@@ -486,7 +489,7 @@ public:
/// SetJmpFn - LLVM _setjmp function.
llvm::Constant *getSetJmpFn() {
// This is specifically the prototype for x86.
- const llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
+ llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty,
params, false),
"_setjmp");
@@ -503,46 +506,46 @@ class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
public:
// MethodListnfABITy - LLVM for struct _method_list_t
- const llvm::StructType *MethodListnfABITy;
+ llvm::StructType *MethodListnfABITy;
// MethodListnfABIPtrTy - LLVM for struct _method_list_t*
- const llvm::Type *MethodListnfABIPtrTy;
+ llvm::Type *MethodListnfABIPtrTy;
// ProtocolnfABITy = LLVM for struct _protocol_t
- const llvm::StructType *ProtocolnfABITy;
+ llvm::StructType *ProtocolnfABITy;
// ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
- const llvm::Type *ProtocolnfABIPtrTy;
+ llvm::Type *ProtocolnfABIPtrTy;
// ProtocolListnfABITy - LLVM for struct _objc_protocol_list
- const llvm::StructType *ProtocolListnfABITy;
+ llvm::StructType *ProtocolListnfABITy;
// ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
- const llvm::Type *ProtocolListnfABIPtrTy;
+ llvm::Type *ProtocolListnfABIPtrTy;
// ClassnfABITy - LLVM for struct _class_t
- const llvm::StructType *ClassnfABITy;
+ llvm::StructType *ClassnfABITy;
// ClassnfABIPtrTy - LLVM for struct _class_t*
- const llvm::Type *ClassnfABIPtrTy;
+ llvm::Type *ClassnfABIPtrTy;
// IvarnfABITy - LLVM for struct _ivar_t
- const llvm::StructType *IvarnfABITy;
+ llvm::StructType *IvarnfABITy;
// IvarListnfABITy - LLVM for struct _ivar_list_t
- const llvm::StructType *IvarListnfABITy;
+ llvm::StructType *IvarListnfABITy;
// IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
- const llvm::Type *IvarListnfABIPtrTy;
+ llvm::Type *IvarListnfABIPtrTy;
// ClassRonfABITy - LLVM for struct _class_ro_t
- const llvm::StructType *ClassRonfABITy;
+ llvm::StructType *ClassRonfABITy;
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- const llvm::Type *ImpnfABITy;
+ llvm::Type *ImpnfABITy;
// CategorynfABITy - LLVM for struct _category_t
- const llvm::StructType *CategorynfABITy;
+ llvm::StructType *CategorynfABITy;
// New types for nonfragile abi messaging.
@@ -551,31 +554,31 @@ public:
// IMP messenger;
// SEL name;
// };
- const llvm::StructType *MessageRefTy;
+ llvm::StructType *MessageRefTy;
// MessageRefCTy - clang type for struct _message_ref_t
QualType MessageRefCTy;
// MessageRefPtrTy - LLVM for struct _message_ref_t*
- const llvm::Type *MessageRefPtrTy;
+ llvm::Type *MessageRefPtrTy;
// MessageRefCPtrTy - clang type for struct _message_ref_t*
QualType MessageRefCPtrTy;
// MessengerTy - Type of the messenger (shown as IMP above)
- const llvm::FunctionType *MessengerTy;
+ llvm::FunctionType *MessengerTy;
// SuperMessageRefTy - LLVM for:
// struct _super_message_ref_t {
// SUPER_IMP messenger;
// SEL name;
// };
- const llvm::StructType *SuperMessageRefTy;
+ llvm::StructType *SuperMessageRefTy;
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
- const llvm::Type *SuperMessageRefPtrTy;
+ llvm::Type *SuperMessageRefPtrTy;
llvm::Constant *getMessageSendFixupFn() {
// id objc_msgSend_fixup(id, struct message_ref_t*, ...)
- const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSend_fixup");
@@ -583,7 +586,7 @@ public:
llvm::Constant *getMessageSendFpretFixupFn() {
// id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
- const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSend_fpret_fixup");
@@ -591,7 +594,7 @@ public:
llvm::Constant *getMessageSendStretFixupFn() {
// id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
- const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSend_stret_fixup");
@@ -600,7 +603,7 @@ public:
llvm::Constant *getMessageSendSuper2FixupFn() {
// id objc_msgSendSuper2_fixup (struct objc_super *,
// struct _super_message_ref_t*, ...)
- const llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
+ llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSendSuper2_fixup");
@@ -609,7 +612,7 @@ public:
llvm::Constant *getMessageSendSuper2StretFixupFn() {
// id objc_msgSendSuper2_stret_fixup(struct objc_super *,
// struct _super_message_ref_t*, ...)
- const llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
+ llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSendSuper2_stret_fixup");
@@ -622,15 +625,15 @@ public:
}
llvm::Constant *getObjCBeginCatchFn() {
- const llvm::Type *params[] = { Int8PtrTy };
+ llvm::Type *params[] = { Int8PtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
params, false),
"objc_begin_catch");
}
- const llvm::StructType *EHTypeTy;
- const llvm::Type *EHTypePtrTy;
-
+ llvm::StructType *EHTypeTy;
+ llvm::Type *EHTypePtrTy;
+
ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
~ObjCNonFragileABITypesHelper(){}
};
@@ -887,6 +890,11 @@ private:
llvm::Value *EmitClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
+ llvm::Value *EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
+
/// EmitSuperClassRef - Emits reference to class's main metadata class.
llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
@@ -1158,6 +1166,11 @@ private:
/// for the given class reference.
llvm::Value *EmitClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
/// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given super class reference.
@@ -1402,6 +1415,19 @@ llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
}
llvm::Constant *CGObjCMac::GetEHType(QualType T) {
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ return CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().ObjCIdRedefinitionType, /*ForEH=*/true);
+ }
+ if (T->isObjCClassType() ||
+ T->isObjCQualifiedClassType()) {
+ return CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().ObjCClassRedefinitionType, /*ForEH=*/true);
+ }
+ if (T->isObjCObjectPointerType())
+ return CGM.GetAddrOfRTTIDescriptor(T, /*ForEH=*/true);
+
llvm_unreachable("asking for catch type for ObjC type in fragile runtime");
return 0;
}
@@ -1526,7 +1552,7 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
ActualArgs.add(RValue::get(Arg0), Arg0Ty);
ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType());
- ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ ActualArgs.addFrom(CallArgs);
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
@@ -1562,7 +1588,7 @@ static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
if (FQT.isObjCGCStrong())
return Qualifiers::Strong;
- if (FQT.isObjCGCWeak())
+ if (FQT.isObjCGCWeak() || FQT.getObjCLifetime() == Qualifiers::OCL_Weak)
return Qualifiers::Weak;
if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
@@ -1579,7 +1605,8 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
llvm::Constant *nullPtr =
llvm::Constant::getNullValue(llvm::Type::getInt8PtrTy(VMContext));
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC &&
+ !CGM.getLangOptions().ObjCAutoRefCount)
return nullPtr;
bool hasUnion = false;
@@ -1866,7 +1893,7 @@ CGObjCMac::EmitProtocolList(llvm::Twine Name,
// This list is null terminated.
ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
- std::vector<llvm::Constant*> Values(3);
+ llvm::Constant *Values[3];
// This field is only used by the runtime.
Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy,
@@ -1876,7 +1903,7 @@ CGObjCMac::EmitProtocolList(llvm::Twine Name,
ProtocolRefs.size()),
ProtocolRefs);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV =
CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
4, false);
@@ -1950,13 +1977,13 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(llvm::Twine Name,
unsigned PropertySize =
CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
- std::vector<llvm::Constant*> Values(3);
+ llvm::Constant *Values[3];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
Properties.size());
Values[2] = llvm::ConstantArray::get(AT, Properties);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV =
CreateMetadataVar(Name, Init,
@@ -1994,12 +2021,12 @@ llvm::Constant *CGObjCMac::EmitMethodDescList(llvm::Twine Name,
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
- std::vector<llvm::Constant*> Values(2);
+ llvm::Constant *Values[2];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
Methods.size());
Values[1] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
return llvm::ConstantExpr::getBitCast(GV,
@@ -2044,7 +2071,7 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
ClassMethods.push_back(GetMethodConstant(*i));
}
- std::vector<llvm::Constant*> Values(7);
+ llvm::Constant *Values[7];
Values[0] = GetClassName(OCD->getIdentifier());
Values[1] = GetClassName(Interface->getIdentifier());
LazySymbols.insert(Interface->getIdentifier());
@@ -2129,7 +2156,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
Interface->all_referenced_protocol_begin(),
Interface->all_referenced_protocol_end());
unsigned Flags = eClassFlags_Factory;
- if (ID->getNumIvarInitializers())
+ if (ID->hasCXXStructors())
Flags |= eClassFlags_HasCXXStructors;
unsigned Size =
CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity();
@@ -2166,7 +2193,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
}
}
- std::vector<llvm::Constant*> Values(12);
+ llvm::Constant *Values[12];
Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
// Record a reference to the super class.
@@ -2225,7 +2252,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= eClassFlags_Hidden;
- std::vector<llvm::Constant*> Values(12);
+ llvm::Constant *Values[12];
// The isa for the metaclass is the root of the hierarchy.
const ObjCInterfaceDecl *Root = ID->getClassInterface();
while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
@@ -2339,7 +2366,7 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
uint64_t Size =
CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
- std::vector<llvm::Constant*> Values(3);
+ llvm::Constant *Values[3];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
Values[1] = BuildIvarLayout(ID, false);
Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
@@ -2383,11 +2410,8 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
ObjCInterfaceDecl *OID =
const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
- llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
- CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
-
- for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
- ObjCIvarDecl *IVD = OIvars[i];
+ for (ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
@@ -2402,12 +2426,12 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
if (Ivars.empty())
return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
- std::vector<llvm::Constant*> Values(2);
+ llvm::Constant *Values[2];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
Ivars.size());
Values[1] = llvm::ConstantArray::get(AT, Ivars);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV;
if (ForClass)
@@ -2459,17 +2483,16 @@ llvm::Constant *CGObjCMac::EmitMethodList(llvm::Twine Name,
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
- std::vector<llvm::Constant*> Values(3);
+ llvm::Constant *Values[3];
Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
Methods.size());
Values[2] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
- return llvm::ConstantExpr::getBitCast(GV,
- ObjCTypes.MethodListPtrTy);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy);
}
llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
@@ -2558,7 +2581,7 @@ namespace {
: S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar),
ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
// Check whether we need to call objc_exception_try_exit.
// In optimized code, this branch will always be folded.
llvm::BasicBlock *FinallyCallExit =
@@ -2674,14 +2697,12 @@ FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
void FragileHazards::emitWriteHazard() {
if (Locals.empty()) return;
- CGF.Builder.CreateCall(WriteHazard, Locals.begin(), Locals.end())
- ->setDoesNotThrow();
+ CGF.Builder.CreateCall(WriteHazard, Locals)->setDoesNotThrow();
}
void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
assert(!Locals.empty());
- Builder.CreateCall(ReadHazard, Locals.begin(), Locals.end())
- ->setDoesNotThrow();
+ Builder.CreateCall(ReadHazard, Locals)->setDoesNotThrow();
}
/// Emit read hazards in all the protected blocks, i.e. all the blocks
@@ -2745,7 +2766,7 @@ void FragileHazards::collectLocals() {
}
llvm::FunctionType *FragileHazards::GetAsmFnType() {
- llvm::SmallVector<const llvm::Type *, 16> tys(Locals.size());
+ llvm::SmallVector<llvm::Type *, 16> tys(Locals.size());
for (unsigned i = 0, e = Locals.size(); i != e; ++i)
tys[i] = Locals[i]->getType();
return llvm::FunctionType::get(CGF.VoidTy, tys, false);
@@ -3389,7 +3410,7 @@ void CGObjCCommonMac::EmitImageInfo() {
Section = "__DATA, __objc_imageinfo, regular, no_dead_strip";
llvm::GlobalVariable *GV =
CreateMetadataVar("\01L_OBJC_IMAGE_INFO",
- llvm::ConstantArray::get(AT, values, 2),
+ llvm::ConstantArray::get(AT, values),
Section,
0,
true);
@@ -3430,7 +3451,7 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
if (!NumClasses && !NumCategories)
return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
- std::vector<llvm::Constant*> Values(5);
+ llvm::Constant *Values[5];
Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
@@ -3452,7 +3473,7 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
NumClasses + NumCategories),
Symbols);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV =
CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
@@ -3461,25 +3482,35 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
}
-llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
- const ObjCInterfaceDecl *ID) {
- LazySymbols.insert(ID->getIdentifier());
-
- llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
-
+llvm::Value *CGObjCMac::EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II) {
+ LazySymbols.insert(II);
+
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
if (!Entry) {
llvm::Constant *Casted =
- llvm::ConstantExpr::getBitCast(GetClassName(ID->getIdentifier()),
- ObjCTypes.ClassPtrTy);
+ llvm::ConstantExpr::getBitCast(GetClassName(II),
+ ObjCTypes.ClassPtrTy);
Entry =
- CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
- "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
- 4, true);
+ CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ 4, true);
}
-
+
return Builder.CreateLoad(Entry, "tmp");
}
+llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(Builder, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCMac::EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(Builder, II);
+}
+
llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel,
bool lvalue) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
@@ -3567,12 +3598,18 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
uint64_t MaxFieldOffset = 0;
uint64_t MaxSkippedFieldOffset = 0;
uint64_t LastBitfieldOrUnnamedOffset = 0;
+ uint64_t FirstFieldDelta = 0;
if (RecFields.empty())
return;
unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
-
+ if (!RD && CGM.getLangOptions().ObjCAutoRefCount) {
+ FieldDecl *FirstField = RecFields[0];
+ FirstFieldDelta =
+ ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(FirstField));
+ }
+
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
FieldDecl *Field = RecFields[i];
uint64_t FieldOffset;
@@ -3580,9 +3617,10 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
// Note that 'i' here is actually the field index inside RD of Field,
// although this dependency is hidden.
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- FieldOffset = RL.getFieldOffset(i) / ByteSizeInBits;
+ FieldOffset = (RL.getFieldOffset(i) / ByteSizeInBits) - FirstFieldDelta;
} else
- FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
+ FieldOffset =
+ ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field)) - FirstFieldDelta;
// Skip over unnamed or bitfields
if (!Field->getIdentifier() || Field->isBitField()) {
@@ -3861,16 +3899,25 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
bool hasUnion = false;
const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC &&
+ !CGM.getLangOptions().ObjCAutoRefCount)
return llvm::Constant::getNullValue(PtrTy);
- llvm::SmallVector<ObjCIvarDecl*, 32> Ivars;
- const ObjCInterfaceDecl *OI = OMD->getClassInterface();
- CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
-
+ ObjCInterfaceDecl *OI =
+ const_cast<ObjCInterfaceDecl*>(OMD->getClassInterface());
llvm::SmallVector<FieldDecl*, 32> RecFields;
- for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
- RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+ if (CGM.getLangOptions().ObjCAutoRefCount) {
+ for (ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar())
+ RecFields.push_back(cast<FieldDecl>(IVD));
+ }
+ else {
+ llvm::SmallVector<ObjCIvarDecl*, 32> Ivars;
+ CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
+
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+ }
if (RecFields.empty())
return llvm::Constant::getNullValue(PtrTy);
@@ -4113,21 +4160,19 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// char *name;
// char *attributes;
// }
- PropertyTy = llvm::StructType::get(VMContext, Int8PtrTy, Int8PtrTy, NULL);
- CGM.getModule().addTypeName("struct._prop_t",
- PropertyTy);
+ PropertyTy = llvm::StructType::createNamed("struct._prop_t",
+ Int8PtrTy, Int8PtrTy, NULL);
// struct _prop_list_t {
// uint32_t entsize; // sizeof(struct _prop_t)
// uint32_t count_of_properties;
// struct _prop_t prop_list[count_of_properties];
// }
- PropertyListTy = llvm::StructType::get(VMContext, IntTy,
- IntTy,
- llvm::ArrayType::get(PropertyTy, 0),
- NULL);
- CGM.getModule().addTypeName("struct._prop_list_t",
- PropertyListTy);
+ PropertyListTy =
+ llvm::StructType::createNamed("struct._prop_list_t",
+ IntTy, IntTy,
+ llvm::ArrayType::get(PropertyTy, 0),
+ NULL);
// struct _prop_list_t *
PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
@@ -4136,16 +4181,14 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// char *method_type;
// char *_imp;
// }
- MethodTy = llvm::StructType::get(VMContext, SelectorPtrTy,
- Int8PtrTy,
- Int8PtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_method", MethodTy);
+ MethodTy = llvm::StructType::createNamed("struct._objc_method",
+ SelectorPtrTy, Int8PtrTy, Int8PtrTy,
+ NULL);
// struct _objc_cache *
- CacheTy = llvm::OpaqueType::get(VMContext);
- CGM.getModule().addTypeName("struct._objc_cache", CacheTy);
+ CacheTy = llvm::StructType::createNamed(VMContext, "struct._objc_cache");
CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
+
}
ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
@@ -4155,22 +4198,18 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// char *types;
// }
MethodDescriptionTy =
- llvm::StructType::get(VMContext, SelectorPtrTy,
- Int8PtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_method_description",
- MethodDescriptionTy);
+ llvm::StructType::createNamed("struct._objc_method_description",
+ SelectorPtrTy, Int8PtrTy, NULL);
// struct _objc_method_description_list {
// int count;
// struct _objc_method_description[1];
// }
MethodDescriptionListTy =
- llvm::StructType::get(VMContext, IntTy,
- llvm::ArrayType::get(MethodDescriptionTy, 0),
- NULL);
- CGM.getModule().addTypeName("struct._objc_method_description_list",
- MethodDescriptionListTy);
+ llvm::StructType::createNamed("struct._objc_method_description_list",
+ IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0),
+ NULL);
// struct _objc_method_description_list *
MethodDescriptionListPtrTy =
@@ -4185,29 +4224,27 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_property_list *instance_properties;
// }
ProtocolExtensionTy =
- llvm::StructType::get(VMContext, IntTy,
- MethodDescriptionListPtrTy,
- MethodDescriptionListPtrTy,
- PropertyListPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_protocol_extension",
- ProtocolExtensionTy);
+ llvm::StructType::createNamed("struct._objc_protocol_extension",
+ IntTy,
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ PropertyListPtrTy,
+ NULL);
// struct _objc_protocol_extension *
ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
// Handle recursive construction of Protocol and ProtocolList types
- llvm::PATypeHolder ProtocolTyHolder = llvm::OpaqueType::get(VMContext);
- llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
+ ProtocolTy =
+ llvm::StructType::createNamed(VMContext, "struct._objc_protocol");
- const llvm::Type *T =
- llvm::StructType::get(VMContext,
- llvm::PointerType::getUnqual(ProtocolListTyHolder),
+ ProtocolListTy =
+ llvm::StructType::createNamed(VMContext, "struct._objc_protocol_list");
+ ProtocolListTy->setBody(llvm::PointerType::getUnqual(ProtocolListTy),
LongTy,
- llvm::ArrayType::get(ProtocolTyHolder, 0),
+ llvm::ArrayType::get(ProtocolTy, 0),
NULL);
- cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(T);
// struct _objc_protocol {
// struct _objc_protocol_extension *isa;
@@ -4216,22 +4253,15 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_method_description_list *instance_methods;
// struct _objc_method_description_list *class_methods;
// }
- T = llvm::StructType::get(VMContext, ProtocolExtensionPtrTy,
- Int8PtrTy,
- llvm::PointerType::getUnqual(ProtocolListTyHolder),
- MethodDescriptionListPtrTy,
- MethodDescriptionListPtrTy,
- NULL);
- cast<llvm::OpaqueType>(ProtocolTyHolder.get())->refineAbstractTypeTo(T);
-
- ProtocolListTy = cast<llvm::StructType>(ProtocolListTyHolder.get());
- CGM.getModule().addTypeName("struct._objc_protocol_list",
- ProtocolListTy);
+ ProtocolTy->setBody(ProtocolExtensionPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListTy),
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ NULL);
+
// struct _objc_protocol_list *
ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
- ProtocolTy = cast<llvm::StructType>(ProtocolTyHolder.get());
- CGM.getModule().addTypeName("struct._objc_protocol", ProtocolTy);
ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy);
// Class description structures
@@ -4241,32 +4271,26 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// char *ivar_type;
// int ivar_offset;
// }
- IvarTy = llvm::StructType::get(VMContext, Int8PtrTy,
- Int8PtrTy,
- IntTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_ivar", IvarTy);
+ IvarTy = llvm::StructType::createNamed("struct._objc_ivar",
+ Int8PtrTy, Int8PtrTy, IntTy, NULL);
// struct _objc_ivar_list *
- IvarListTy = llvm::OpaqueType::get(VMContext);
- CGM.getModule().addTypeName("struct._objc_ivar_list", IvarListTy);
+ IvarListTy =
+ llvm::StructType::createNamed(VMContext, "struct._objc_ivar_list");
IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
// struct _objc_method_list *
- MethodListTy = llvm::OpaqueType::get(VMContext);
- CGM.getModule().addTypeName("struct._objc_method_list", MethodListTy);
+ MethodListTy =
+ llvm::StructType::createNamed(VMContext, "struct._objc_method_list");
MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
// struct _objc_class_extension *
ClassExtensionTy =
- llvm::StructType::get(VMContext, IntTy,
- Int8PtrTy,
- PropertyListPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_class_extension", ClassExtensionTy);
+ llvm::StructType::createNamed("struct._objc_class_extension",
+ IntTy, Int8PtrTy, PropertyListPtrTy, NULL);
ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
- llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext);
+ ClassTy = llvm::StructType::createNamed(VMContext, "struct._objc_class");
// struct _objc_class {
// Class isa;
@@ -4282,24 +4306,20 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// char *ivar_layout;
// struct _objc_class_ext *ext;
// };
- T = llvm::StructType::get(VMContext,
- llvm::PointerType::getUnqual(ClassTyHolder),
- llvm::PointerType::getUnqual(ClassTyHolder),
- Int8PtrTy,
- LongTy,
- LongTy,
- LongTy,
- IvarListPtrTy,
- MethodListPtrTy,
- CachePtrTy,
- ProtocolListPtrTy,
- Int8PtrTy,
- ClassExtensionPtrTy,
- NULL);
- cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(T);
-
- ClassTy = cast<llvm::StructType>(ClassTyHolder.get());
- CGM.getModule().addTypeName("struct._objc_class", ClassTy);
+ ClassTy->setBody(llvm::PointerType::getUnqual(ClassTy),
+ llvm::PointerType::getUnqual(ClassTy),
+ Int8PtrTy,
+ LongTy,
+ LongTy,
+ LongTy,
+ IvarListPtrTy,
+ MethodListPtrTy,
+ CachePtrTy,
+ ProtocolListPtrTy,
+ Int8PtrTy,
+ ClassExtensionPtrTy,
+ NULL);
+
ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
// struct _objc_category {
@@ -4310,15 +4330,11 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// uint32_t size; // sizeof(struct _objc_category)
// struct _objc_property_list *instance_properties;// category's @property
// }
- CategoryTy = llvm::StructType::get(VMContext, Int8PtrTy,
- Int8PtrTy,
- MethodListPtrTy,
- MethodListPtrTy,
- ProtocolListPtrTy,
- IntTy,
- PropertyListPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_category", CategoryTy);
+ CategoryTy =
+ llvm::StructType::createNamed("struct._objc_category",
+ Int8PtrTy, Int8PtrTy, MethodListPtrTy,
+ MethodListPtrTy, ProtocolListPtrTy,
+ IntTy, PropertyListPtrTy, NULL);
// Global metadata structures
@@ -4329,13 +4345,10 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// short cat_def_cnt;
// char *defs[cls_def_cnt + cat_def_cnt];
// }
- SymtabTy = llvm::StructType::get(VMContext, LongTy,
- SelectorPtrTy,
- ShortTy,
- ShortTy,
- llvm::ArrayType::get(Int8PtrTy, 0),
- NULL);
- CGM.getModule().addTypeName("struct._objc_symtab", SymtabTy);
+ SymtabTy =
+ llvm::StructType::createNamed("struct._objc_symtab",
+ LongTy, SelectorPtrTy, ShortTy, ShortTy,
+ llvm::ArrayType::get(Int8PtrTy, 0), NULL);
SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
// struct _objc_module {
@@ -4345,12 +4358,8 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_symtab* symtab;
// }
ModuleTy =
- llvm::StructType::get(VMContext, LongTy,
- LongTy,
- Int8PtrTy,
- SymtabPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_module", ModuleTy);
+ llvm::StructType::createNamed("struct._objc_module",
+ LongTy, LongTy, Int8PtrTy, SymtabPtrTy, NULL);
// FIXME: This is the size of the setjmp buffer and should be target
@@ -4358,16 +4367,14 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
uint64_t SetJmpBufferSize = 18;
// Exceptions
- const llvm::Type *StackPtrTy = llvm::ArrayType::get(
+ llvm::Type *StackPtrTy = llvm::ArrayType::get(
llvm::Type::getInt8PtrTy(VMContext), 4);
ExceptionDataTy =
- llvm::StructType::get(VMContext,
- llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
- SetJmpBufferSize),
- StackPtrTy, NULL);
- CGM.getModule().addTypeName("struct._objc_exception_data",
- ExceptionDataTy);
+ llvm::StructType::createNamed("struct._objc_exception_data",
+ llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
+ SetJmpBufferSize),
+ StackPtrTy, NULL);
}
@@ -4378,12 +4385,11 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// uint32_t method_count;
// struct _objc_method method_list[method_count];
// }
- MethodListnfABITy = llvm::StructType::get(VMContext, IntTy,
- IntTy,
- llvm::ArrayType::get(MethodTy, 0),
- NULL);
- CGM.getModule().addTypeName("struct.__method_list_t",
- MethodListnfABITy);
+ MethodListnfABITy =
+ llvm::StructType::createNamed("struct.__method_list_t",
+ IntTy, IntTy,
+ llvm::ArrayType::get(MethodTy, 0),
+ NULL);
// struct method_list_t *
MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
@@ -4401,22 +4407,21 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
// Holder for struct _protocol_list_t *
- llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
-
- ProtocolnfABITy = llvm::StructType::get(VMContext, ObjectPtrTy,
- Int8PtrTy,
- llvm::PointerType::getUnqual(
- ProtocolListTyHolder),
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- PropertyListPtrTy,
- IntTy,
- IntTy,
- NULL);
- CGM.getModule().addTypeName("struct._protocol_t",
- ProtocolnfABITy);
+ ProtocolListnfABITy =
+ llvm::StructType::createNamed(VMContext, "struct._objc_protocol_list");
+
+ ProtocolnfABITy =
+ llvm::StructType::createNamed("struct._protocol_t",
+ ObjectPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListnfABITy),
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ PropertyListPtrTy,
+ IntTy,
+ IntTy,
+ NULL);
// struct _protocol_t*
ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
@@ -4425,14 +4430,9 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// long protocol_count; // Note, this is 32/64 bit
// struct _protocol_t *[protocol_count];
// }
- ProtocolListnfABITy = llvm::StructType::get(VMContext, LongTy,
- llvm::ArrayType::get(
- ProtocolnfABIPtrTy, 0),
- NULL);
- CGM.getModule().addTypeName("struct._objc_protocol_list",
- ProtocolListnfABITy);
- cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(
- ProtocolListnfABITy);
+ ProtocolListnfABITy->setBody(LongTy,
+ llvm::ArrayType::get(ProtocolnfABIPtrTy, 0),
+ NULL);
// struct _objc_protocol_list*
ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
@@ -4444,26 +4444,25 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// uint32_t alignment;
// uint32_t size;
// }
- IvarnfABITy = llvm::StructType::get(VMContext,
- llvm::PointerType::getUnqual(LongTy),
- Int8PtrTy,
- Int8PtrTy,
- IntTy,
- IntTy,
- NULL);
- CGM.getModule().addTypeName("struct._ivar_t", IvarnfABITy);
+ IvarnfABITy =
+ llvm::StructType::createNamed("struct._ivar_t",
+ llvm::PointerType::getUnqual(LongTy),
+ Int8PtrTy,
+ Int8PtrTy,
+ IntTy,
+ IntTy,
+ NULL);
// struct _ivar_list_t {
// uint32 entsize; // sizeof(struct _ivar_t)
// uint32 count;
// struct _iver_t list[count];
// }
- IvarListnfABITy = llvm::StructType::get(VMContext, IntTy,
- IntTy,
- llvm::ArrayType::get(
- IvarnfABITy, 0),
- NULL);
- CGM.getModule().addTypeName("struct._ivar_list_t", IvarListnfABITy);
+ IvarListnfABITy =
+ llvm::StructType::createNamed("struct._ivar_list_t",
+ IntTy, IntTy,
+ llvm::ArrayType::get(IvarnfABITy, 0),
+ NULL);
IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
@@ -4482,22 +4481,21 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
// FIXME. Add 'reserved' field in 64bit abi mode!
- ClassRonfABITy = llvm::StructType::get(VMContext, IntTy,
- IntTy,
- IntTy,
- Int8PtrTy,
- Int8PtrTy,
- MethodListnfABIPtrTy,
- ProtocolListnfABIPtrTy,
- IvarListnfABIPtrTy,
- Int8PtrTy,
- PropertyListPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._class_ro_t",
- ClassRonfABITy);
+ ClassRonfABITy = llvm::StructType::createNamed("struct._class_ro_t",
+ IntTy,
+ IntTy,
+ IntTy,
+ Int8PtrTy,
+ Int8PtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ IvarListnfABIPtrTy,
+ Int8PtrTy,
+ PropertyListPtrTy,
+ NULL);
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
ImpnfABITy = llvm::FunctionType::get(ObjectPtrTy, params, false)
->getPointerTo();
@@ -4509,19 +4507,13 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// struct class_ro_t *ro;
// }
- llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext);
- ClassnfABITy =
- llvm::StructType::get(VMContext,
- llvm::PointerType::getUnqual(ClassTyHolder),
- llvm::PointerType::getUnqual(ClassTyHolder),
- CachePtrTy,
- llvm::PointerType::getUnqual(ImpnfABITy),
- llvm::PointerType::getUnqual(ClassRonfABITy),
- NULL);
- CGM.getModule().addTypeName("struct._class_t", ClassnfABITy);
-
- cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(
- ClassnfABITy);
+ ClassnfABITy = llvm::StructType::createNamed(VMContext, "struct._class_t");
+ ClassnfABITy->setBody(llvm::PointerType::getUnqual(ClassnfABITy),
+ llvm::PointerType::getUnqual(ClassnfABITy),
+ CachePtrTy,
+ llvm::PointerType::getUnqual(ImpnfABITy),
+ llvm::PointerType::getUnqual(ClassRonfABITy),
+ NULL);
// LLVM for struct _class_t *
ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
@@ -4534,14 +4526,14 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// const struct _protocol_list_t * const protocols;
// const struct _prop_list_t * const properties;
// }
- CategorynfABITy = llvm::StructType::get(VMContext, Int8PtrTy,
- ClassnfABIPtrTy,
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- ProtocolListnfABIPtrTy,
- PropertyListPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._category_t", CategorynfABITy);
+ CategorynfABITy = llvm::StructType::createNamed("struct._category_t",
+ Int8PtrTy,
+ ClassnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ PropertyListPtrTy,
+ NULL);
// New types for nonfragile abi messaging.
CodeGen::CodeGenTypes &Types = CGM.getTypes();
@@ -4576,26 +4568,25 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// SUPER_IMP messenger;
// SEL name;
// };
- SuperMessageRefTy = llvm::StructType::get(VMContext, ImpnfABITy,
- SelectorPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._super_message_ref_t", SuperMessageRefTy);
+ SuperMessageRefTy =
+ llvm::StructType::createNamed("struct._super_message_ref_t",
+ ImpnfABITy, SelectorPtrTy, NULL);
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
-
+
// struct objc_typeinfo {
// const void** vtable; // objc_ehtype_vtable + 2
// const char* name; // c++ typeinfo string
// Class cls;
// };
- EHTypeTy = llvm::StructType::get(VMContext,
- llvm::PointerType::getUnqual(Int8PtrTy),
- Int8PtrTy,
- ClassnfABIPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_typeinfo", EHTypeTy);
+ EHTypeTy =
+ llvm::StructType::createNamed("struct._objc_typeinfo",
+ llvm::PointerType::getUnqual(Int8PtrTy),
+ Int8PtrTy,
+ ClassnfABIPtrTy,
+ NULL);
EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
}
@@ -4743,7 +4734,12 @@ enum MetaDataDlags {
CLS_META = 0x1,
CLS_ROOT = 0x2,
OBJC2_CLS_HIDDEN = 0x10,
- CLS_EXCEPTION = 0x20
+ CLS_EXCEPTION = 0x20,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ CLS_HAS_IVAR_RELEASER = 0x40,
+ /// class was compiled with -fobjc-arr
+ CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
};
/// BuildClassRoTInitializer - generate meta-data for:
/// struct _class_ro_t {
@@ -4767,6 +4763,10 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
const ObjCImplementationDecl *ID) {
std::string ClassName = ID->getNameAsString();
std::vector<llvm::Constant*> Values(10); // 11 for 64bit targets!
+
+ if (CGM.getLangOptions().ObjCAutoRefCount)
+ flags |= CLS_COMPILED_BY_ARC;
+
Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
@@ -4936,7 +4936,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
ID->getClassInterface()->getVisibility() == HiddenVisibility;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
- if (ID->getNumIvarInitializers())
+ if (ID->hasCXXStructors())
flags |= eClassFlags_ABI2_HasCXXStructors;
if (!ID->getClassInterface()->getSuperClass()) {
// class is root
@@ -4972,7 +4972,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
flags = CLS;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
- if (ID->getNumIvarInitializers())
+ if (ID->hasCXXStructors())
flags |= eClassFlags_ABI2_HasCXXStructors;
if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
@@ -5174,7 +5174,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(llvm::Twine Name,
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
- std::vector<llvm::Constant*> Values(3);
+ llvm::Constant *Values[3];
// sizeof(struct _objc_method)
unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
@@ -5183,19 +5183,15 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(llvm::Twine Name,
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
Methods.size());
Values[2] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::InternalLinkage,
- Init,
- Name);
- GV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ llvm::GlobalValue::InternalLinkage, Init, Name);
+ GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
GV->setSection(Section);
CGM.AddUsedGlobal(GV);
- return llvm::ConstantExpr::getBitCast(GV,
- ObjCTypes.MethodListnfABIPtrTy);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
}
/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
@@ -5262,17 +5258,14 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
std::vector<llvm::Constant*> Ivars, Ivar(5);
- const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ ObjCInterfaceDecl *OID =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
// FIXME. Consolidate this with similar code in GenerateClass.
- // Collect declared and synthesized ivars in a small vector.
- llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
- CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
-
- for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
- ObjCIvarDecl *IVD = OIvars[i];
+ for (ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
@@ -5298,14 +5291,15 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
// Return null for empty list.
if (Ivars.empty())
return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
- std::vector<llvm::Constant*> Values(3);
+
+ llvm::Constant *Values[3];
unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
Ivars.size());
Values[2] = llvm::ConstantArray::get(AT, Ivars);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
@@ -5491,7 +5485,7 @@ CGObjCNonFragileABIMac::EmitProtocolList(llvm::Twine Name,
ProtocolRefs.push_back(llvm::Constant::getNullValue(
ObjCTypes.ProtocolnfABIPtrTy));
- std::vector<llvm::Constant*> Values(2);
+ llvm::Constant *Values[2];
Values[0] =
llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
Values[1] =
@@ -5500,11 +5494,10 @@ CGObjCNonFragileABIMac::EmitProtocolList(llvm::Twine Name,
ProtocolRefs.size()),
ProtocolRefs);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
llvm::GlobalValue::InternalLinkage,
- Init,
- Name);
+ Init, Name);
GV->setSection("__DATA, __objc_const");
GV->setAlignment(
CGM.getTargetData().getABITypeAlignment(Init->getType()));
@@ -5654,8 +5647,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
if (!messageRef) {
// Build the message ref structure.
llvm::Constant *values[] = { fn, GetMethodVarName(selector) };
- llvm::Constant *init =
- llvm::ConstantStruct::get(VMContext, values, 2, false);
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(values);
messageRef = new llvm::GlobalVariable(CGM.getModule(),
init->getType(),
/*constant*/ false,
@@ -5719,28 +5711,39 @@ CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name) {
return GV;
}
-llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
- const ObjCInterfaceDecl *ID) {
- llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
-
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II) {
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
if (!Entry) {
- std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ std::string ClassName(getClassSymbolPrefix() + II->getName().str());
llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
- false, llvm::GlobalValue::InternalLinkage,
- ClassGV,
- "\01L_OBJC_CLASSLIST_REFERENCES_$_");
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_REFERENCES_$_");
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(
- ObjCTypes.ClassnfABIPtrTy));
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
CGM.AddUsedGlobal(Entry);
}
-
+
return Builder.CreateLoad(Entry, "tmp");
}
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(Builder, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
+ CGBuilderTy &Builder) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(Builder, II);
+}
+
llvm::Value *
CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID) {
@@ -6043,12 +6046,10 @@ void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy,
"tmp");
- llvm::Value *Args[] = { Exception };
- CGF.EmitCallOrInvoke(ObjCTypes.getExceptionThrowFn(),
- Args, Args+1)
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionThrowFn(), Exception)
.setDoesNotReturn();
} else {
- CGF.EmitCallOrInvoke(ObjCTypes.getExceptionRethrowFn(), 0, 0)
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionRethrowFn())
.setDoesNotReturn();
}
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 21150f1..09c8d0b 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -52,14 +52,14 @@ static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
// implemented. This should be fixed to get the information from the layout
// directly.
unsigned Index = 0;
- llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- CGM.getContext().ShallowCollectObjCIvars(Container, Ivars);
- for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
- if (Ivar == Ivars[k])
+ ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl*>(Container);
+
+ for (ObjCIvarDecl *IVD = IDecl->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ if (Ivar == IVD)
break;
++Index;
}
- assert(Index != Ivars.size() && "Ivar is not inside container!");
assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
return RL->getFieldOffset(Index);
@@ -134,7 +134,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
ContainingTypeSize, ContainingTypeAlign));
return LValue::MakeBitfield(V, *Info,
- IvarTy.getCVRQualifiers() | CVRQualifiers);
+ IvarTy.withCVRQualifiers(CVRQualifiers));
}
namespace {
@@ -151,13 +151,13 @@ namespace {
bool MightThrow;
llvm::Value *Fn;
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
if (!MightThrow) {
CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
return;
}
- CGF.EmitCallOrInvoke(Fn, 0, 0);
+ CGF.EmitCallOrInvoke(Fn);
}
};
}
@@ -175,10 +175,8 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CodeGenFunction::FinallyInfo FinallyInfo;
if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
- FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(),
- beginCatchFn,
- endCatchFn,
- exceptionRethrowFn);
+ FinallyInfo.enter(CGF, Finally->getFinallyBody(),
+ beginCatchFn, endCatchFn, exceptionRethrowFn);
llvm::SmallVector<CatchHandler, 8> Handlers;
@@ -266,9 +264,9 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
// Go back to the try-statement fallthrough.
CGF.Builder.restoreIP(SavedIP);
- // Pop out of the normal cleanup on the finally.
+ // Pop out of the finally.
if (S.getFinallyStmt())
- CGF.ExitFinallyBlock(FinallyInfo);
+ FinallyInfo.exit(CGF);
if (Cont.isValid())
CGF.EmitBlock(Cont.getBlock());
@@ -281,7 +279,7 @@ namespace {
CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
: SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
}
};
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 866d5d8..7accc70 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -205,7 +205,13 @@ public:
/// interface decl.
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *OID) = 0;
-
+
+
+ virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ assert(false &&"autoreleasepool unsupported in this ABI");
+ return 0;
+ }
+
/// EnumerationMutationFunction - Return the function that's called by the
/// compiler when a mutation is detected during foreach iteration.
virtual llvm::Constant *EnumerationMutationFunction() = 0;
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index c73b199..e564c70 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -658,9 +658,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
break;
}
- llvm::Constant *Init =
- llvm::ConstantStruct::get(VMContext, &Fields[0], Fields.size(),
- /*Packed=*/false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index 6d9fc05..8a45029 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -176,11 +176,11 @@ class CGRecordLayout {
private:
/// The LLVM type corresponding to this record layout; used when
/// laying it out as a complete object.
- llvm::PATypeHolder CompleteObjectType;
+ llvm::StructType *CompleteObjectType;
/// The LLVM type for the non-virtual part of this record layout;
/// used when laying it out as a base subobject.
- llvm::PATypeHolder BaseSubobjectType;
+ llvm::StructType *BaseSubobjectType;
/// Map from (non-bit-field) struct field to the corresponding llvm struct
/// type field no. This info is populated by record builder.
@@ -208,8 +208,8 @@ private:
bool IsZeroInitializableAsBase : 1;
public:
- CGRecordLayout(const llvm::StructType *CompleteObjectType,
- const llvm::StructType *BaseSubobjectType,
+ CGRecordLayout(llvm::StructType *CompleteObjectType,
+ llvm::StructType *BaseSubobjectType,
bool IsZeroInitializable,
bool IsZeroInitializableAsBase)
: CompleteObjectType(CompleteObjectType),
@@ -219,14 +219,14 @@ public:
/// \brief Return the "complete object" LLVM type associated with
/// this record.
- const llvm::StructType *getLLVMType() const {
- return cast<llvm::StructType>(CompleteObjectType.get());
+ llvm::StructType *getLLVMType() const {
+ return CompleteObjectType;
}
/// \brief Return the "base subobject" LLVM type associated with
/// this record.
- const llvm::StructType *getBaseSubobjectLLVMType() const {
- return cast<llvm::StructType>(BaseSubobjectType.get());
+ llvm::StructType *getBaseSubobjectLLVMType() const {
+ return BaseSubobjectType;
}
/// \brief Check whether this struct can be C++ zero-initialized
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 0d72f85..2b07baf 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "CodeGenTypes.h"
#include "CGCXXABI.h"
#include "llvm/DerivedTypes.h"
@@ -34,7 +35,7 @@ class CGRecordLayoutBuilder {
public:
/// FieldTypes - Holds the LLVM types that the struct is created from.
///
- llvm::SmallVector<const llvm::Type *, 16> FieldTypes;
+ llvm::SmallVector<llvm::Type *, 16> FieldTypes;
/// BaseSubobjectType - Holds the LLVM type for the non-virtual part
/// of the struct. For example, consider:
@@ -51,7 +52,7 @@ public:
///
/// This only gets initialized if the base subobject type is
/// different from the complete-object type.
- const llvm::StructType *BaseSubobjectType;
+ llvm::StructType *BaseSubobjectType;
/// FieldInfo - Holds a field and its corresponding LLVM field number.
llvm::DenseMap<const FieldDecl *, unsigned> Fields;
@@ -108,8 +109,8 @@ private:
/// LayoutUnionField - Will layout a field in an union and return the type
/// that the field will have.
- const llvm::Type *LayoutUnionField(const FieldDecl *Field,
- const ASTRecordLayout &Layout);
+ llvm::Type *LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout);
/// LayoutUnion - Will layout a union RecordDecl.
void LayoutUnion(const RecordDecl *D);
@@ -150,7 +151,7 @@ private:
void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
/// AppendField - Appends a field with the given offset and type.
- void AppendField(CharUnits fieldOffset, const llvm::Type *FieldTy);
+ void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
/// AppendPadding - Appends enough padding bytes so that the total
/// struct size is a multiple of the field alignment.
@@ -164,7 +165,7 @@ private:
/// getByteArrayType - Returns a byte array type with the given number of
/// elements.
- const llvm::Type *getByteArrayType(CharUnits NumBytes);
+ llvm::Type *getByteArrayType(CharUnits NumBytes);
/// AppendBytes - Append a given number of bytes to the record.
void AppendBytes(CharUnits numBytes);
@@ -229,7 +230,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
uint64_t FieldSize,
uint64_t ContainingTypeSizeInBits,
unsigned ContainingTypeAlign) {
- const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
+ const llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
CharUnits TypeSizeInBytes =
CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
@@ -268,6 +269,18 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
unsigned AccessedTargetBits = 0; // The number of target bits accessed.
unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
+ // If requested, widen the initial bit-field access to be register sized. The
+ // theory is that this is most likely to allow multiple accesses into the same
+ // structure to be coalesced, and that the backend should be smart enough to
+ // narrow the store if no coalescing is ever done.
+ //
+ // The subsequent code will handle align these access to common boundaries and
+ // guaranteeing that we do not access past the end of the structure.
+ if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
+ if (AccessWidth < Types.getTarget().getRegisterWidth())
+ AccessWidth = Types.getTarget().getRegisterWidth();
+ }
+
// Round down from the field offset to find the first access position that is
// at an aligned offset of the initial access type.
uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
@@ -427,7 +440,7 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
CharUnits fieldOffsetInBytes
= Types.getContext().toCharUnitsFromBits(fieldOffset);
- const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
+ llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
CharUnits typeAlignment = getTypeAlignment(Ty);
// If the type alignment is larger then the struct alignment, we must use
@@ -475,7 +488,7 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
return true;
}
-const llvm::Type *
+llvm::Type *
CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
const ASTRecordLayout &Layout) {
if (Field->isBitField()) {
@@ -486,7 +499,7 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
if (FieldSize == 0)
return 0;
- const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
llvm::RoundUpToAlignment(FieldSize,
Types.getContext().Target.getCharAlign()));
@@ -502,7 +515,7 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
// This is a regular union field.
Fields[Field] = 0;
- return Types.ConvertTypeForMemRecursive(Field->getType());
+ return Types.ConvertTypeForMem(Field->getType());
}
void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
@@ -510,7 +523,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
- const llvm::Type *unionType = 0;
+ llvm::Type *unionType = 0;
CharUnits unionSize = CharUnits::Zero();
CharUnits unionAlign = CharUnits::Zero();
@@ -521,7 +534,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
assert(layout.getFieldOffset(fieldNo) == 0 &&
"Union field offset did not start at the beginning of record!");
- const llvm::Type *fieldType = LayoutUnionField(*field, layout);
+ llvm::Type *fieldType = LayoutUnionField(*field, layout);
if (!fieldType)
continue;
@@ -586,10 +599,8 @@ void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
// approximation, which is to use the base subobject type if it
// has the same LLVM storage size as the nvsize.
- const llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
+ llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
AppendField(baseOffset, subobjectType);
-
- Types.addBaseSubobjectTypeName(base, baseLayout);
}
void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
@@ -723,13 +734,14 @@ CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
FieldTypes.push_back(getByteArrayType(NumBytes));
}
- BaseSubobjectType = llvm::StructType::get(Types.getLLVMContext(),
- FieldTypes, Packed);
+
+ BaseSubobjectType = llvm::StructType::createNamed(Types.getLLVMContext(), "",
+ FieldTypes, Packed);
+ Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
- if (needsPadding) {
- // Pull the padding back off.
+ // Pull the padding back off.
+ if (needsPadding)
FieldTypes.pop_back();
- }
return true;
}
@@ -806,7 +818,7 @@ void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
}
void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
- const llvm::Type *fieldType) {
+ llvm::Type *fieldType) {
CharUnits fieldSize =
CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
@@ -852,10 +864,10 @@ bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
return true;
}
-const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
+llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
- const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
if (numBytes > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
@@ -911,17 +923,16 @@ void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
}
}
-CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
+CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty) {
CGRecordLayoutBuilder Builder(*this);
Builder.Layout(D);
- const llvm::StructType *Ty = llvm::StructType::get(getLLVMContext(),
- Builder.FieldTypes,
- Builder.Packed);
+ Ty->setBody(Builder.FieldTypes, Builder.Packed);
// If we're in C++, compute the base subobject type.
- const llvm::StructType *BaseTy = 0;
+ llvm::StructType *BaseTy = 0;
if (isa<CXXRecordDecl>(D)) {
BaseTy = Builder.BaseSubobjectType;
if (!BaseTy) BaseTy = Ty;
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index a982621..07bddb7 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -151,6 +151,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::ObjCForCollectionStmtClass:
EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
break;
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
+ break;
case Stmt::CXXTryStmtClass:
EmitCXXTryStmt(cast<CXXTryStmt>(*S));
@@ -764,7 +767,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (RV->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(RV, ReturnValue, false);
} else {
- EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, false, true));
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Qualifiers(), true));
}
EmitBranchThroughCleanup(ReturnBlock);
@@ -1275,11 +1278,16 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
return Constraint;
llvm::StringRef Register = Attr->getLabel();
assert(Target.isValidGCCRegisterName(Register));
- // FIXME: We should check which registers are compatible with "r" or "x".
- if (Constraint != "r" && Constraint != "x") {
+ // We're using validateOutputConstraint here because we only care if
+ // this is a register constraint.
+ TargetInfo::ConstraintInfo Info(Constraint, "");
+ if (Target.validateOutputConstraint(Info) &&
+ !Info.allowsRegister()) {
CGM.ErrorUnsupported(&Stmt, "__asm__");
return Constraint;
}
+ // Canonicalize the register here before returning it.
+ Register = Target.getNormalizedGCCRegisterName(Register);
return "{" + Register.str() + "}";
}
@@ -1291,7 +1299,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
llvm::Value *Arg;
if (Info.allowsRegister() || !Info.allowsMemory()) {
if (!CodeGenFunction::hasAggregateLLVMType(InputType)) {
- Arg = EmitLoadOfLValue(InputValue, InputType).getScalarVal();
+ Arg = EmitLoadOfLValue(InputValue).getScalarVal();
} else {
const llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
@@ -1400,15 +1408,15 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::vector<LValue> ResultRegDests;
std::vector<QualType> ResultRegQualTys;
- std::vector<const llvm::Type *> ResultRegTypes;
- std::vector<const llvm::Type *> ResultTruncRegTypes;
- std::vector<const llvm::Type*> ArgTypes;
+ std::vector<llvm::Type *> ResultRegTypes;
+ std::vector<llvm::Type *> ResultTruncRegTypes;
+ std::vector<llvm::Type*> ArgTypes;
std::vector<llvm::Value*> Args;
// Keep track of inout constraints.
std::string InOutConstraints;
std::vector<llvm::Value*> InOutArgs;
- std::vector<const llvm::Type*> InOutArgTypes;
+ std::vector<llvm::Type*> InOutArgTypes;
for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
@@ -1457,7 +1465,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
ResultRegTypes.back() = ConvertType(InputTy);
}
}
- if (const llvm::Type* AdjTy =
+ if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
ResultRegTypes.back()))
ResultRegTypes.back() = AdjTy;
@@ -1550,6 +1558,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
llvm::StringRef Clobber = S.getClobber(i)->getString();
+ if (Clobber != "memory" && Clobber != "cc")
Clobber = Target.getNormalizedGCCRegisterName(Clobber);
if (i != 0 || NumConstraints != 0)
@@ -1582,7 +1591,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, AsmString, Constraints,
S.isVolatile() || S.getNumOutputs() == 0);
- llvm::CallInst *Result = Builder.CreateCall(IA, Args.begin(), Args.end());
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args);
Result->addAttribute(~0, llvm::Attribute::NoUnwind);
// Slap the source location of the inline asm into a !srcloc metadata on the
@@ -1629,7 +1638,6 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
}
- EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i],
- ResultRegQualTys[i]);
+ EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
}
}
diff --git a/lib/CodeGen/CGTemporaries.cpp b/lib/CodeGen/CGTemporaries.cpp
index 3b4c509..0387dae 100644
--- a/lib/CodeGen/CGTemporaries.cpp
+++ b/lib/CodeGen/CGTemporaries.cpp
@@ -16,9 +16,12 @@ using namespace clang;
using namespace CodeGen;
namespace {
- struct DestroyTemporary {
- static void Emit(CodeGenFunction &CGF, bool forEH,
- const CXXDestructorDecl *dtor, llvm::Value *addr) {
+ struct DestroyTemporary : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *dtor;
+ llvm::Value *addr;
+ DestroyTemporary(const CXXDestructorDecl *dtor, llvm::Value *addr)
+ : dtor(dtor), addr(addr) {}
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*ForVirtualBase=*/false,
addr);
}
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index aefc41e..cec02cd 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -390,8 +390,7 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
llvm::Constant *Init =
- llvm::ConstantArray::get(ArrayType, Builder.getVTTComponents().data(),
- Builder.getVTTComponents().size());
+ llvm::ConstantArray::get(ArrayType, Builder.getVTTComponents());
VTT->setInitializer(Init);
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 9ac5e67..c161b79 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -2937,7 +2937,8 @@ void CodeGenVTables::MaybeEmitThunkAvailableExternally(GlobalDecl GD,
// We can't emit thunks for member functions with incomplete types.
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- if (CGM.getTypes().VerifyFuncTypeComplete(MD->getType().getTypePtr()))
+ if (!CGM.getTypes().isFuncTypeConvertible(
+ cast<FunctionType>(MD->getType().getTypePtr())))
return;
EmitThunk(GD, Thunk, /*UseAvailableExternallyLinkage=*/true);
@@ -3165,7 +3166,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
}
llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
- return llvm::ConstantArray::get(ArrayType, Inits.data(), Inits.size());
+ return llvm::ConstantArray::get(ArrayType, Inits);
}
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index e830e9a..eff6e56 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -18,7 +18,7 @@
#include "llvm/GlobalVariable.h"
#include "clang/Basic/ABI.h"
#include "clang/AST/CharUnits.h"
-#include "GlobalDecl.h"
+#include "clang/AST/GlobalDecl.h"
namespace clang {
class CXXRecordDecl;
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 7f77d55..4d0b841 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -101,8 +101,6 @@ public:
/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
/// bitrange.
class LValue {
- // FIXME: alignment?
-
enum {
Simple, // This is a normal l-value, use getAddress().
VectorElt, // This is a vector element l-value (V[i]), use getVector*
@@ -128,6 +126,8 @@ class LValue {
const ObjCPropertyRefExpr *PropertyRefExpr;
};
+ QualType Type;
+
// 'const' is unused here
Qualifiers Quals;
@@ -156,8 +156,9 @@ class LValue {
llvm::MDNode *TBAAInfo;
private:
- void Initialize(Qualifiers Quals, unsigned Alignment = 0,
+ void Initialize(QualType Type, Qualifiers Quals, unsigned Alignment = 0,
llvm::MDNode *TBAAInfo = 0) {
+ this->Type = Type;
this->Quals = Quals;
this->Alignment = Alignment;
assert(this->Alignment == Alignment && "Alignment exceeds allowed max!");
@@ -182,6 +183,12 @@ public:
return Quals.getCVRQualifiers() & ~Qualifiers::Const;
}
+ QualType getType() const { return Type; }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
+ }
+
bool isObjCIvar() const { return Ivar; }
void setObjCIvar(bool Value) { Ivar = Value; }
@@ -203,6 +210,10 @@ public:
bool isObjCStrong() const {
return Quals.getObjCGCAttr() == Qualifiers::Strong;
}
+
+ bool isVolatile() const {
+ return Quals.hasVolatile();
+ }
Expr *getBaseIvarExp() const { return BaseIvarExp; }
void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
@@ -219,6 +230,10 @@ public:
// simple lvalue
llvm::Value *getAddress() const { assert(isSimple()); return V; }
+ void setAddress(llvm::Value *address) {
+ assert(isSimple());
+ V = address;
+ }
// vector elt lvalue
llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
@@ -251,36 +266,36 @@ public:
return PropertyRefExpr;
}
- static LValue MakeAddr(llvm::Value *V, QualType T, unsigned Alignment,
- ASTContext &Context,
+ static LValue MakeAddr(llvm::Value *address, QualType type,
+ unsigned alignment, ASTContext &Context,
llvm::MDNode *TBAAInfo = 0) {
- Qualifiers Quals = T.getQualifiers();
- Quals.setObjCGCAttr(Context.getObjCGCAttrKind(T));
+ Qualifiers qs = type.getQualifiers();
+ qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
LValue R;
R.LVType = Simple;
- R.V = V;
- R.Initialize(Quals, Alignment, TBAAInfo);
+ R.V = address;
+ R.Initialize(type, qs, alignment, TBAAInfo);
return R;
}
static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
- unsigned CVR) {
+ QualType type) {
LValue R;
R.LVType = VectorElt;
R.V = Vec;
R.VectorIdx = Idx;
- R.Initialize(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(type, type.getQualifiers());
return R;
}
static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
- unsigned CVR) {
+ QualType type) {
LValue R;
R.LVType = ExtVectorElt;
R.V = Vec;
R.VectorElts = Elts;
- R.Initialize(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(type, type.getQualifiers());
return R;
}
@@ -290,13 +305,14 @@ public:
/// bit-field.
/// \param Info - The information describing how to perform the bit-field
/// access.
- static LValue MakeBitfield(llvm::Value *BaseValue, const CGBitFieldInfo &Info,
- unsigned CVR) {
+ static LValue MakeBitfield(llvm::Value *BaseValue,
+ const CGBitFieldInfo &Info,
+ QualType type) {
LValue R;
R.LVType = BitField;
R.V = BaseValue;
R.BitFieldInfo = &Info;
- R.Initialize(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(type, type.getQualifiers());
return R;
}
@@ -309,7 +325,7 @@ public:
R.LVType = PropertyRef;
R.V = Base;
R.PropertyRefExpr = E;
- R.Initialize(Qualifiers());
+ R.Initialize(QualType(), Qualifiers());
return R;
}
};
@@ -318,9 +334,11 @@ public:
class AggValueSlot {
/// The address.
llvm::Value *Addr;
+
+ // Qualifiers
+ Qualifiers Quals;
// Associated flags.
- bool VolatileFlag : 1;
bool LifetimeFlag : 1;
bool RequiresGCollection : 1;
@@ -335,25 +353,31 @@ public:
static AggValueSlot ignored() {
AggValueSlot AV;
AV.Addr = 0;
- AV.VolatileFlag = AV.LifetimeFlag = AV.RequiresGCollection = AV.IsZeroed =0;
+ AV.Quals = Qualifiers();
+ AV.LifetimeFlag = AV.RequiresGCollection = AV.IsZeroed =0;
return AV;
}
/// forAddr - Make a slot for an aggregate value.
///
/// \param Volatile - true if the slot should be volatile-initialized
+ ///
+ /// \param Qualifiers - The qualifiers that dictate how the slot
+ /// should be initialied. Only 'volatile' and the Objective-C
+ /// lifetime qualifiers matter.
+ ///
/// \param LifetimeExternallyManaged - true if the slot's lifetime
/// is being externally managed; false if a destructor should be
/// registered for any temporaries evaluated into the slot
/// \param RequiresGCollection - true if the slot is located
/// somewhere that ObjC GC calls should be emitted for
- static AggValueSlot forAddr(llvm::Value *Addr, bool Volatile,
+ static AggValueSlot forAddr(llvm::Value *Addr, Qualifiers Quals,
bool LifetimeExternallyManaged,
bool RequiresGCollection = false,
bool IsZeroed = false) {
AggValueSlot AV;
AV.Addr = Addr;
- AV.VolatileFlag = Volatile;
+ AV.Quals = Quals;
AV.LifetimeFlag = LifetimeExternallyManaged;
AV.RequiresGCollection = RequiresGCollection;
AV.IsZeroed = IsZeroed;
@@ -361,9 +385,10 @@ public:
}
static AggValueSlot forLValue(LValue LV, bool LifetimeExternallyManaged,
- bool RequiresGCollection = false) {
- return forAddr(LV.getAddress(), LV.isVolatileQualified(),
- LifetimeExternallyManaged, RequiresGCollection);
+ bool RequiresGCollection = false,
+ bool IsZeroed = false) {
+ return forAddr(LV.getAddress(), LV.getQuals(),
+ LifetimeExternallyManaged, RequiresGCollection, IsZeroed);
}
bool isLifetimeExternallyManaged() const {
@@ -373,8 +398,14 @@ public:
LifetimeFlag = Managed;
}
+ Qualifiers getQualifiers() const { return Quals; }
+
bool isVolatile() const {
- return VolatileFlag;
+ return Quals.hasVolatile();
+ }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
}
bool requiresGCollection() const {
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 62fa1f9..263e01e 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -34,6 +34,7 @@ namespace clang {
BackendAction Action;
const CodeGenOptions &CodeGenOpts;
const TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
llvm::raw_ostream *AsmOutStream;
ASTContext *Context;
@@ -46,13 +47,16 @@ namespace clang {
public:
BackendConsumer(BackendAction action, Diagnostic &_Diags,
const CodeGenOptions &compopts,
- const TargetOptions &targetopts, bool TimePasses,
+ const TargetOptions &targetopts,
+ const LangOptions &langopts,
+ bool TimePasses,
const std::string &infile, llvm::raw_ostream *OS,
LLVMContext &C) :
Diags(_Diags),
Action(action),
CodeGenOpts(compopts),
TargetOpts(targetopts),
+ LangOpts(langopts),
AsmOutStream(OS),
LLVMIRGeneration("LLVM IR Generation Time"),
Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)) {
@@ -126,7 +130,7 @@ namespace clang {
void *OldContext = Ctx.getInlineAsmDiagnosticContext();
Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this);
- EmitBackendOutput(Diags, CodeGenOpts, TargetOpts,
+ EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
TheModule.get(), Action, AsmOutStream);
Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
@@ -285,6 +289,7 @@ ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
BEConsumer =
new BackendConsumer(BA, CI.getDiagnostics(),
CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getLangOpts(),
CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
*VMContext);
return BEConsumer;
@@ -332,7 +337,8 @@ void CodeGenAction::ExecuteAction() {
}
EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(),
- CI.getTargetOpts(), TheModule.get(),
+ CI.getTargetOpts(), CI.getLangOpts(),
+ TheModule.get(),
BA, OS);
return;
}
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 150cb69..702897a 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -31,7 +31,7 @@ using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: CodeGenTypeCache(cgm), CGM(cgm),
Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()),
- BlockInfo(0), BlockPointer(0),
+ AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
ExceptionSlot(0), EHSelectorSlot(0),
DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
@@ -45,11 +45,11 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
}
-const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
+llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
}
-const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
+llvm::Type *CodeGenFunction::ConvertType(QualType T) {
return CGM.getTypes().ConvertType(T);
}
@@ -142,6 +142,13 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
assert(BreakContinueStack.empty() &&
"mismatched push/pop in break/continue stack!");
+ // Pop any cleanups that might have been associated with the
+ // parameters. Do this in whatever block we're currently in; it's
+ // important to do this before we enter the return block or return
+ // edges will be *really* confused.
+ if (EHStack.stable_begin() != PrologueCleanupDepth)
+ PopCleanupBlocks(PrologueCleanupDepth);
+
// Emit function epilog (to return).
EmitReturnBlock();
@@ -206,15 +213,15 @@ bool CodeGenFunction::ShouldInstrumentFunction() {
/// function instrumentation is enabled.
void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
// void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
- const llvm::PointerType *PointerTy = Int8PtrTy;
- const llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
+ llvm::PointerType *PointerTy = Int8PtrTy;
+ llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
const llvm::FunctionType *FunctionTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
ProfileFuncArgs, false);
llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
llvm::CallInst *CallSite = Builder.CreateCall(
- CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
+ CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
llvm::ConstantInt::get(Int32Ty, 0),
"callsite");
@@ -311,9 +318,19 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
ReturnValue = CurFn->arg_begin();
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
+
+ // Tell the epilog emitter to autorelease the result. We do this
+ // now so that various specialized functions can suppress it
+ // during their IR-generation.
+ if (getLangOptions().ObjCAutoRefCount &&
+ !CurFnInfo->isReturnsRetained() &&
+ RetTy->isObjCRetainableType())
+ AutoreleaseResult = true;
}
EmitStartEHSpec(CurCodeDecl);
+
+ PrologueCleanupDepth = EHStack.stable_begin();
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
@@ -326,7 +343,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
QualType Ty = (*i)->getType();
if (Ty->isVariablyModifiedType())
- EmitVLASize(Ty);
+ EmitVariablyModifiedType(Ty);
}
}
@@ -692,13 +709,20 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
if (const VariableArrayType *vlaType =
dyn_cast_or_null<VariableArrayType>(
getContext().getAsArrayType(Ty))) {
- SizeVal = GetVLASize(vlaType);
+ QualType eltType;
+ llvm::Value *numElts;
+ llvm::tie(numElts, eltType) = getVLASize(vlaType);
+
+ SizeVal = numElts;
+ CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
+ if (!eltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
vla = vlaType;
} else {
return;
}
} else {
- SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
+ SizeVal = CGM.getSize(Size);
vla = 0;
}
@@ -761,60 +785,198 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
return IndirectBranch->getParent();
}
-llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
- llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
+/// Computes the length of an array in elements, as well as the base
+/// element type and a properly-typed first element pointer.
+llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
+ QualType &baseType,
+ llvm::Value *&addr) {
+ const ArrayType *arrayType = origArrayType;
+
+ // If it's a VLA, we have to load the stored size. Note that
+ // this is the size of the VLA in bytes, not its size in elements.
+ llvm::Value *numVLAElements = 0;
+ if (isa<VariableArrayType>(arrayType)) {
+ numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
+
+ // Walk into all VLAs. This doesn't require changes to addr,
+ // which has type T* where T is the first non-VLA element type.
+ do {
+ QualType elementType = arrayType->getElementType();
+ arrayType = getContext().getAsArrayType(elementType);
+
+ // If we only have VLA components, 'addr' requires no adjustment.
+ if (!arrayType) {
+ baseType = elementType;
+ return numVLAElements;
+ }
+ } while (isa<VariableArrayType>(arrayType));
- assert(SizeEntry && "Did not emit size for type");
- return SizeEntry;
-}
+ // We get out here only if we find a constant array type
+ // inside the VLA.
+ }
-llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
- assert(Ty->isVariablyModifiedType() &&
- "Must pass variably modified type to EmitVLASizes!");
+ // We have some number of constant-length arrays, so addr should
+ // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
+ // down to the first element of addr.
+ llvm::SmallVector<llvm::Value*, 8> gepIndices;
- EnsureInsertPoint();
+ // GEP down to the array type.
+ llvm::ConstantInt *zero = Builder.getInt32(0);
+ gepIndices.push_back(zero);
+
+ // It's more efficient to calculate the count from the LLVM
+ // constant-length arrays than to re-evaluate the array bounds.
+ uint64_t countFromCLAs = 1;
+
+ const llvm::ArrayType *llvmArrayType =
+ cast<llvm::ArrayType>(
+ cast<llvm::PointerType>(addr->getType())->getElementType());
+ while (true) {
+ assert(isa<ConstantArrayType>(arrayType));
+ assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
+ == llvmArrayType->getNumElements());
+
+ gepIndices.push_back(zero);
+ countFromCLAs *= llvmArrayType->getNumElements();
+
+ llvmArrayType =
+ dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
+ if (!llvmArrayType) break;
+
+ arrayType = getContext().getAsArrayType(arrayType->getElementType());
+ assert(arrayType && "LLVM and Clang types are out-of-synch");
+ }
+
+ baseType = arrayType->getElementType();
+
+ // Create the actual GEP.
+ addr = Builder.CreateInBoundsGEP(addr, gepIndices.begin(),
+ gepIndices.end(), "array.begin");
- if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
- // unknown size indication requires no size computation.
- if (!VAT->getSizeExpr())
- return 0;
- llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
+ llvm::Value *numElements
+ = llvm::ConstantInt::get(SizeTy, countFromCLAs);
- if (!SizeEntry) {
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ // If we had any VLA dimensions, factor them in.
+ if (numVLAElements)
+ numElements = Builder.CreateNUWMul(numVLAElements, numElements);
- // Get the element size;
- QualType ElemTy = VAT->getElementType();
- llvm::Value *ElemSize;
- if (ElemTy->isVariableArrayType())
- ElemSize = EmitVLASize(ElemTy);
- else
- ElemSize = llvm::ConstantInt::get(SizeTy,
- getContext().getTypeSizeInChars(ElemTy).getQuantity());
+ return numElements;
+}
+
+std::pair<llvm::Value*, QualType>
+CodeGenFunction::getVLASize(QualType type) {
+ const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
+ assert(vla && "type was not a variable array type!");
+ return getVLASize(vla);
+}
+
+std::pair<llvm::Value*, QualType>
+CodeGenFunction::getVLASize(const VariableArrayType *type) {
+ // The number of elements so far; always size_t.
+ llvm::Value *numElements = 0;
- llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
- NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
+ QualType elementType;
+ do {
+ elementType = type->getElementType();
+ llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
+ assert(vlaSize && "no size for VLA!");
+ assert(vlaSize->getType() == SizeTy);
- SizeEntry = Builder.CreateMul(ElemSize, NumElements);
+ if (!numElements) {
+ numElements = vlaSize;
+ } else {
+ // It's undefined behavior if this wraps around, so mark it that way.
+ numElements = Builder.CreateNUWMul(numElements, vlaSize);
}
+ } while ((type = getContext().getAsVariableArrayType(elementType)));
- return SizeEntry;
- }
+ return std::pair<llvm::Value*,QualType>(numElements, elementType);
+}
- if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
- EmitVLASize(AT->getElementType());
- return 0;
- }
+void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
+ assert(type->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
- if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
- EmitVLASize(PT->getInnerType());
- return 0;
- }
+ EnsureInsertPoint();
+
+ // We're going to walk down into the type and look for VLA
+ // expressions.
+ type = type.getCanonicalType();
+ do {
+ assert(type->isVariablyModifiedType());
+
+ const Type *ty = type.getTypePtr();
+ switch (ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("unexpected dependent or non-canonical type!");
+
+ // These types are never variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Record:
+ case Type::Enum:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ llvm_unreachable("type class is never variably-modified!");
+
+ case Type::Pointer:
+ type = cast<PointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::BlockPointer:
+ type = cast<BlockPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ type = cast<ReferenceType>(ty)->getPointeeType();
+ break;
+
+ case Type::MemberPointer:
+ type = cast<MemberPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // Losing element qualification here is fine.
+ type = cast<ArrayType>(ty)->getElementType();
+ break;
+
+ case Type::VariableArray: {
+ // Losing element qualification here is fine.
+ const VariableArrayType *vat = cast<VariableArrayType>(ty);
+
+ // Unknown size indication requires no size computation.
+ // Otherwise, evaluate and record it.
+ if (const Expr *size = vat->getSizeExpr()) {
+ // It's possible that we might have emitted this already,
+ // e.g. with a typedef and a pointer to it.
+ llvm::Value *&entry = VLASizeMap[size];
+ if (!entry) {
+ // Always zexting here would be wrong if it weren't
+ // undefined behavior to have a negative bound.
+ entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy,
+ /*signed*/ false);
+ }
+ }
+ type = vat->getElementType();
+ break;
+ }
- const PointerType *PT = Ty->getAs<PointerType>();
- assert(PT && "unknown VM type!");
- EmitVLASize(PT->getPointeeType());
- return 0;
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ type = cast<FunctionType>(ty)->getResultType();
+ break;
+ }
+ } while (type->isVariablyModifiedType());
}
llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index bb8fd8e..f27ed94 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -18,8 +18,10 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/CharUnits.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ValueHandle.h"
@@ -63,6 +65,7 @@ namespace clang {
class ObjCAtTryStmt;
class ObjCAtThrowStmt;
class ObjCAtSynchronizedStmt;
+ class ObjCAutoreleasePoolStmt;
namespace CodeGen {
class CodeGenTypes;
@@ -179,15 +182,39 @@ public:
/// Cleanup implementations should generally be declared in an
/// anonymous namespace.
class Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
public:
- // Anchor the construction vtable. We use the destructor because
- // gcc gives an obnoxious warning if there are virtual methods
- // with an accessible non-virtual destructor. Unfortunately,
- // declaring this destructor makes it non-trivial, but there
- // doesn't seem to be any other way around this warning.
- //
- // This destructor will never be called.
- virtual ~Cleanup();
+ /// Generation flags.
+ class Flags {
+ enum {
+ F_IsForEH = 0x1,
+ F_IsNormalCleanupKind = 0x2,
+ F_IsEHCleanupKind = 0x4
+ };
+ unsigned flags;
+
+ public:
+ Flags() : flags(0) {}
+
+ /// isForEH - true if the current emission is for an EH cleanup.
+ bool isForEHCleanup() const { return flags & F_IsForEH; }
+ bool isForNormalCleanup() const { return !isForEHCleanup(); }
+ void setIsForEHCleanup() { flags |= F_IsForEH; }
+
+ bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
+ void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
+
+ /// isEHCleanupKind - true if the cleanup was pushed as an EH
+ /// cleanup.
+ bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
+ void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
+ };
+
+ // Provide a virtual destructor to suppress a very common warning
+ // that unfortunately cannot be suppressed without this. Cleanups
+ // should not rely on this destructor ever being called.
+ virtual ~Cleanup() {}
/// Emit the cleanup. For normal cleanups, this is run in the
/// same EH context as when the cleanup was pushed, i.e. the
@@ -196,29 +223,7 @@ public:
///
// \param IsForEHCleanup true if this is for an EH cleanup, false
/// if for a normal cleanup.
- virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
- };
-
- /// UnconditionalCleanupN stores its N parameters and just passes
- /// them to the real cleanup function.
- template <class T, class A0>
- class UnconditionalCleanup1 : public Cleanup {
- A0 a0;
- public:
- UnconditionalCleanup1(A0 a0) : a0(a0) {}
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
- T::Emit(CGF, IsForEHCleanup, a0);
- }
- };
-
- template <class T, class A0, class A1>
- class UnconditionalCleanup2 : public Cleanup {
- A0 a0; A1 a1;
- public:
- UnconditionalCleanup2(A0 a0, A1 a1) : a0(a0), a1(a1) {}
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
- T::Emit(CGF, IsForEHCleanup, a0, a1);
- }
+ virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
};
/// ConditionalCleanupN stores the saved form of its N parameters,
@@ -228,9 +233,9 @@ public:
typedef typename DominatingValue<A0>::saved_type A0_saved;
A0_saved a0_saved;
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
- T::Emit(CGF, IsForEHCleanup, a0);
+ T(a0).Emit(CGF, flags);
}
public:
@@ -245,10 +250,10 @@ public:
A0_saved a0_saved;
A1_saved a1_saved;
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
- T::Emit(CGF, IsForEHCleanup, a0, a1);
+ T(a0, a1).Emit(CGF, flags);
}
public:
@@ -256,6 +261,51 @@ public:
: a0_saved(a0), a1_saved(a1) {}
};
+ template <class T, class A0, class A1, class A2>
+ class ConditionalCleanup3 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ typedef typename DominatingValue<A2>::saved_type A2_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+ A2_saved a2_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
+ T(a0, a1, a2).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
+ : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
+ };
+
+ template <class T, class A0, class A1, class A2, class A3>
+ class ConditionalCleanup4 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ typedef typename DominatingValue<A2>::saved_type A2_saved;
+ typedef typename DominatingValue<A3>::saved_type A3_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+ A2_saved a2_saved;
+ A3_saved a3_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
+ A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
+ T(a0, a1, a2, a3).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
+ : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
+ };
+
private:
// The implementation for this class is in CGException.h and
// CGException.cpp; the definition is here because it's used as a
@@ -568,6 +618,10 @@ public:
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
+ /// PrologueCleanupDepth - The cleanup depth enclosing all the
+ /// cleanups associated with the parameters.
+ EHScopeStack::stable_iterator PrologueCleanupDepth;
+
/// ReturnBlock - Unified return block.
JumpDest ReturnBlock;
@@ -584,6 +638,9 @@ public:
bool CatchUndefined;
+ /// In ARC, whether we should autorelease the return value.
+ bool AutoreleaseResult;
+
const CodeGen::CGBlockInfo *BlockInfo;
llvm::Value *BlockPointer;
@@ -626,16 +683,28 @@ public:
/// rethrows.
llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
- // A struct holding information about a finally block's IR
- // generation. For now, doesn't actually hold anything.
- struct FinallyInfo {
- };
+ /// A class controlling the emission of a finally block.
+ class FinallyInfo {
+ /// Where the catchall's edge through the cleanup should go.
+ JumpDest RethrowDest;
+
+ /// A function to call to enter the catch.
+ llvm::Constant *BeginCatchFn;
- FinallyInfo EnterFinallyBlock(const Stmt *Stmt,
- llvm::Constant *BeginCatchFn,
- llvm::Constant *EndCatchFn,
- llvm::Constant *RethrowFn);
- void ExitFinallyBlock(FinallyInfo &FinallyInfo);
+ /// An i1 variable indicating whether or not the @finally is
+ /// running for an exception.
+ llvm::AllocaInst *ForEHVar;
+
+ /// An i8* variable into which the exception pointer to rethrow
+ /// has been saved.
+ llvm::AllocaInst *SavedExnVar;
+
+ public:
+ void enter(CodeGenFunction &CGF, const Stmt *Finally,
+ llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
+ llvm::Constant *rethrowFn);
+ void exit(CodeGenFunction &CGF);
+ };
/// pushFullExprCleanup - Push a cleanup to be run at the end of the
/// current full-expression. Safe against the possibility that
@@ -644,10 +713,8 @@ public:
void pushFullExprCleanup(CleanupKind kind, A0 a0) {
// If we're not in a conditional branch, or if none of the
// arguments requires saving, then use the unconditional cleanup.
- if (!isInConditionalBranch()) {
- typedef EHScopeStack::UnconditionalCleanup1<T, A0> CleanupType;
- return EHStack.pushCleanup<CleanupType>(kind, a0);
- }
+ if (!isInConditionalBranch())
+ return EHStack.pushCleanup<T>(kind, a0);
typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
@@ -663,10 +730,8 @@ public:
void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
// If we're not in a conditional branch, or if none of the
// arguments requires saving, then use the unconditional cleanup.
- if (!isInConditionalBranch()) {
- typedef EHScopeStack::UnconditionalCleanup2<T, A0, A1> CleanupType;
- return EHStack.pushCleanup<CleanupType>(kind, a0, a1);
- }
+ if (!isInConditionalBranch())
+ return EHStack.pushCleanup<T>(kind, a0, a1);
typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
@@ -676,6 +741,48 @@ public:
initFullExprCleanup();
}
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1, class A2>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ return EHStack.pushCleanup<T>(kind, a0, a1, a2);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+ typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
+
+ typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
+ initFullExprCleanup();
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1, class A2, class A3>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+ typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
+ typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
+
+ typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
+ a2_saved, a3_saved);
+ initFullExprCleanup();
+ }
+
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
/// given address. Does nothing if T is not a C++ class type with a
@@ -1048,6 +1155,9 @@ public:
void disableDebugInfo() { DisableDebugInfo = true; }
void enableDebugInfo() { DisableDebugInfo = false; }
+ bool shouldUseFusedARCCalls() {
+ return CGM.getCodeGenOpts().OptimizationLevel == 0;
+ }
const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
@@ -1075,6 +1185,57 @@ public:
llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
//===--------------------------------------------------------------------===//
+ // Cleanups
+ //===--------------------------------------------------------------------===//
+
+ typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
+
+ void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ Destroyer &destroyer);
+ void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEnd,
+ QualType elementType,
+ Destroyer &destroyer);
+
+ void pushDestroy(QualType::DestructionKind dtorKind,
+ llvm::Value *addr, QualType type);
+ void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
+ Destroyer &destroyer, bool useEHCleanupForArray);
+ void emitDestroy(llvm::Value *addr, QualType type, Destroyer &destroyer,
+ bool useEHCleanupForArray);
+ llvm::Function *generateDestroyHelper(llvm::Constant *addr,
+ QualType type,
+ Destroyer &destroyer,
+ bool useEHCleanupForArray);
+ void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
+ QualType type, Destroyer &destroyer,
+ bool checkZeroLength, bool useEHCleanup);
+
+ Destroyer &getDestroyer(QualType::DestructionKind destructionKind);
+
+ /// Determines whether an EH cleanup is required to destroy a type
+ /// with the given destruction kind.
+ bool needsEHCleanup(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none:
+ return false;
+ case QualType::DK_cxx_destructor:
+ case QualType::DK_objc_weak_lifetime:
+ return getLangOptions().Exceptions;
+ case QualType::DK_objc_strong_lifetime:
+ return getLangOptions().Exceptions &&
+ CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
+ }
+ llvm_unreachable("bad destruction kind");
+ }
+
+ CleanupKind getCleanupKind(QualType::DestructionKind kind) {
+ return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
+ }
+
+ //===--------------------------------------------------------------------===//
// Objective-C
//===--------------------------------------------------------------------===//
@@ -1236,9 +1397,9 @@ public:
/// a terminate scope encloses a try.
llvm::BasicBlock *getTerminateHandler();
- const llvm::Type *ConvertTypeForMem(QualType T);
- const llvm::Type *ConvertType(QualType T);
- const llvm::Type *ConvertType(const TypeDecl *T) {
+ llvm::Type *ConvertTypeForMem(QualType T);
+ llvm::Type *ConvertType(QualType T);
+ llvm::Type *ConvertType(const TypeDecl *T) {
return ConvertType(getContext().getTypeDeclType(T));
}
@@ -1345,7 +1506,8 @@ public:
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const llvm::Twine &Name = "tmp") {
- return AggValueSlot::forAddr(CreateMemTemp(T, Name), false, false);
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name), T.getQualifiers(),
+ false);
}
/// Emit a cast to void* in the appropriate address space.
@@ -1379,14 +1541,12 @@ public:
/// EmitAnyExprToMem - Emits the code necessary to evaluate an
/// arbitrary expression into the given memory location.
void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
- bool IsLocationVolatile,
- bool IsInitializer);
+ Qualifiers Quals, bool IsInitializer);
/// EmitExprAsInit - Emits the code necessary to initialize a
/// location in memory with the given initializer.
- void EmitExprAsInit(const Expr *init, const VarDecl *var,
- llvm::Value *loc, CharUnits alignment,
- bool capturedByInit);
+ void EmitExprAsInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit);
/// EmitAggregateCopy - Emit an aggrate copy.
///
@@ -1451,16 +1611,24 @@ public:
// instruction in LLVM instead once it works well enough.
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
- /// EmitVLASize - Generate code for any VLA size expressions that might occur
- /// in a variably modified type. If Ty is a VLA, will return the value that
- /// corresponds to the size in bytes of the VLA type. Will return 0 otherwise.
+ /// emitArrayLength - Compute the length of an array, even if it's a
+ /// VLA, and drill down to the base element type.
+ llvm::Value *emitArrayLength(const ArrayType *arrayType,
+ QualType &baseType,
+ llvm::Value *&addr);
+
+ /// EmitVLASize - Capture all the sizes for the VLA expressions in
+ /// the given variably-modified type and store them in the VLASizeMap.
///
/// This function can be called with a null (unreachable) insert point.
- llvm::Value *EmitVLASize(QualType Ty);
+ void EmitVariablyModifiedType(QualType Ty);
- // GetVLASize - Returns an LLVM value that corresponds to the size in bytes
- // of a variable length array type.
- llvm::Value *GetVLASize(const VariableArrayType *);
+ /// getVLASize - Returns an LLVM value that corresponds to the size,
+ /// in non-variably-sized elements, of a variable length array type,
+ /// plus that largest non-variably-sized element type. Assumes that
+ /// the type has already been emitted with EmitVariablyModifiedType.
+ std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
+ std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
/// LoadCXXThis - Load the value of 'this'. This function is only valid while
/// generating code for an C++ member function.
@@ -1535,17 +1703,7 @@ public:
CallExpr::const_arg_iterator ArgEnd,
bool ZeroInitialization = false);
- void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This);
-
- void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
- llvm::Value *NumElements,
- llvm::Value *This);
-
- llvm::Function *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This);
+ static Destroyer destroyCXXObject;
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
bool ForVirtualBase, llvm::Value *This);
@@ -1584,6 +1742,10 @@ public:
/// This function can be called with a null (unreachable) insert point.
void EmitVarDecl(const VarDecl &D);
+ void EmitScalarInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit);
+ void EmitScalarInit(llvm::Value *init, LValue lvalue);
+
typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
llvm::Value *Address);
@@ -1639,6 +1801,8 @@ public:
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
void EmitAutoVarInit(const AutoVarEmission &emission);
void EmitAutoVarCleanups(const AutoVarEmission &emission);
+ void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind);
void EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage);
@@ -1709,6 +1873,7 @@ public:
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+ void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
llvm::Constant *getUnwindResumeFn();
llvm::Constant *getUnwindResumeOrRethrowFn();
@@ -1775,6 +1940,12 @@ public:
unsigned Alignment, QualType Ty,
llvm::MDNode *TBAAInfo = 0);
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation. The l-value must be a simple
+ /// l-value.
+ llvm::Value *EmitLoadOfScalar(LValue lvalue);
+
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
@@ -1782,21 +1953,26 @@ public:
bool Volatile, unsigned Alignment, QualType Ty,
llvm::MDNode *TBAAInfo = 0);
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation. The l-value must be a simple
+ /// l-value.
+ void EmitStoreOfScalar(llvm::Value *value, LValue lvalue);
+
/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
/// this method emits the address of the lvalue, then loads the result as an
/// rvalue, returning the rvalue.
- RValue EmitLoadOfLValue(LValue V, QualType LVType);
- RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
- RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
+ RValue EmitLoadOfLValue(LValue V);
+ RValue EmitLoadOfExtVectorElementLValue(LValue V);
+ RValue EmitLoadOfBitfieldLValue(LValue LV);
RValue EmitLoadOfPropertyRefLValue(LValue LV,
ReturnValueSlot Return = ReturnValueSlot());
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
- void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
- void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
- QualType Ty);
+ void EmitStoreThroughLValue(RValue Src, LValue Dst);
+ void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst);
/// EmitStoreThroughLValue - Store Src into Dst with same constraints as
@@ -1805,7 +1981,7 @@ public:
/// \param Result [out] - If non-null, this will be set to a Value* for the
/// bit-field contents after the store, appropriate for use as the result of
/// an assignment to the bit-field.
- void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
+ void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
llvm::Value **Result=0);
/// Emit an l-value for an assignment (simple or compound) of complex type.
@@ -1832,6 +2008,7 @@ public:
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
+ LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
@@ -1897,8 +2074,9 @@ public:
ReturnValueSlot ReturnValue = ReturnValueSlot());
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
- llvm::Value * const *ArgBegin,
- llvm::Value * const *ArgEnd,
+ llvm::ArrayRef<llvm::Value *> Args,
+ const llvm::Twine &Name = "");
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
const llvm::Twine &Name = "");
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
@@ -1961,6 +2139,59 @@ public:
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return = ReturnValueSlot());
+ /// Retrieves the default cleanup kind for an ARC cleanup.
+ /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
+ CleanupKind getARCCleanupKind() {
+ return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
+ ? NormalAndEHCleanup : NormalCleanup;
+ }
+
+ // ARC primitives.
+ void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
+ void EmitARCDestroyWeak(llvm::Value *addr);
+ llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
+ llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
+ llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
+ bool ignored);
+ void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
+ void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
+ llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
+ bool ignored);
+ llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
+ bool ignored);
+ llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCRetainBlock(llvm::Value *value);
+ void EmitARCRelease(llvm::Value *value, bool precise);
+ llvm::Value *EmitARCAutorelease(llvm::Value *value);
+ llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
+
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreAutoreleasing(const BinaryOperator *e);
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
+
+ llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
+
+ llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
+ llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
+
+ static Destroyer destroyARCStrongImprecise;
+ static Destroyer destroyARCStrongPrecise;
+ static Destroyer destroyARCWeak;
+
+ void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
+ llvm::Value *EmitObjCAutoreleasePoolPush();
+ llvm::Value *EmitObjCMRRAutoreleasePoolPush();
+ void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
+ void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
+
/// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
/// expression. Will emit a temporary variable if E is not an LValue.
RValue EmitReferenceBindingToExpr(const Expr* E,
@@ -2002,6 +2233,10 @@ public:
void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType Ty);
+ /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
+ /// make sure it survives garbage collection until this point.
+ void EmitExtendGCLifetime(llvm::Value *object);
+
/// EmitComplexExpr - Emit the computation of the specified expression of
/// complex type, returning the result.
ComplexPairTy EmitComplexExpr(const Expr *E,
@@ -2135,7 +2370,8 @@ private:
/// Ty, into individual arguments on the provided vector \arg Args. See
/// ABIArgInfo::Expand.
void ExpandTypeToArgs(QualType Ty, RValue Src,
- llvm::SmallVector<llvm::Value*, 16> &Args);
+ llvm::SmallVector<llvm::Value*, 16> &Args,
+ llvm::FunctionType *IRFuncTy);
llvm::Value* EmitAsmInput(const AsmStmt &S,
const TargetInfo::ConstraintInfo &Info,
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 7a1a968..0668039 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -62,9 +62,9 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
: Context(C), Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
ABI(createCXXABI(*this)),
- Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI),
+ Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI, CGO),
TBAA(0),
- VTables(*this), Runtime(0), DebugInfo(0),
+ VTables(*this), Runtime(0), DebugInfo(0), ARCData(0), RRData(0),
CFConstantStringClassRef(0), ConstantStringClassRef(0),
VMContext(M.getContext()),
NSConcreteGlobalBlockDecl(0), NSConcreteStackBlockDecl(0),
@@ -88,6 +88,10 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
Block.GlobalUniqueCount = 0;
+ if (C.getLangOptions().ObjCAutoRefCount)
+ ARCData = new ARCEntrypoints();
+ RRData = new RREntrypoints();
+
// Initialize the type cache.
llvm::LLVMContext &LLVMContext = M.getContext();
VoidTy = llvm::Type::getVoidTy(LLVMContext);
@@ -108,6 +112,8 @@ CodeGenModule::~CodeGenModule() {
delete &ABI;
delete TBAA;
delete DebugInfo;
+ delete ARCData;
+ delete RRData;
}
void CodeGenModule::createObjCRuntime() {
@@ -190,6 +196,10 @@ void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
}
+llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
+ return llvm::ConstantInt::get(SizeTy, size.getQuantity());
+}
+
void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
const NamedDecl *D) const {
// Internal definitions always have default visibility.
@@ -347,8 +357,8 @@ void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
// Get the type of a ctor entry, { i32, void ()* }.
- llvm::StructType* CtorStructTy =
- llvm::StructType::get(VMContext, llvm::Type::getInt32Ty(VMContext),
+ llvm::StructType *CtorStructTy =
+ llvm::StructType::get(llvm::Type::getInt32Ty(VMContext),
llvm::PointerType::getUnqual(CtorFTy), NULL);
// Construct the constructor and destructor arrays.
@@ -670,7 +680,7 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
llvm::ConstantExpr::getBitCast(unitGV, SBP),
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LineNo)
};
- return llvm::ConstantStruct::get(VMContext, Fields, 4, false);
+ return llvm::ConstantStruct::getAnon(Fields);
}
bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
@@ -830,7 +840,8 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
llvm::Constant *
CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
const llvm::Type *Ty,
- GlobalDecl D, bool ForVTable) {
+ GlobalDecl D, bool ForVTable,
+ llvm::Attributes ExtraAttrs) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
@@ -846,8 +857,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
return Entry;
// Make sure the result is of the correct type.
- const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
- return llvm::ConstantExpr::getBitCast(Entry, PTy);
+ return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
}
// This function doesn't have a complete type (for example, the return
@@ -869,6 +879,8 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
assert(F->getName() == MangledName && "name was uniqued!");
if (D.getDecl())
SetFunctionAttributes(D, F, IsIncompleteFunction);
+ if (ExtraAttrs != llvm::Attribute::None)
+ F->addFnAttr(ExtraAttrs);
// This is the first use or definition of a mangled name. If there is a
// deferred decl with this name, remember that we need to emit it at the end
@@ -915,7 +927,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
return F;
}
- const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
return llvm::ConstantExpr::getBitCast(F, PTy);
}
@@ -937,8 +949,10 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
/// type and name.
llvm::Constant *
CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
- llvm::StringRef Name) {
- return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false);
+ llvm::StringRef Name,
+ llvm::Attributes ExtraAttrs) {
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
+ ExtraAttrs);
}
static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D,
@@ -1340,7 +1354,8 @@ CodeGenModule::GetLLVMLinkageVarDefinition(const VarDecl *D,
((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
D->getAttr<CommonAttr>()) &&
!D->hasExternalStorage() && !D->getInit() &&
- !D->getAttr<SectionAttr>() && !D->isThreadSpecified()) {
+ !D->getAttr<SectionAttr>() && !D->isThreadSpecified() &&
+ !D->getAttr<WeakImportAttr>()) {
// Thread local vars aren't considered common linkage.
return llvm::GlobalVariable::CommonLinkage;
}
@@ -1398,8 +1413,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
// Okay, we can transform this. Create the new call instruction and copy
// over the required information.
ArgList.append(CS.arg_begin(), CS.arg_begin() + ArgNo);
- llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList.begin(),
- ArgList.end(), "", CI);
+ llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList, "", CI);
ArgList.clear();
if (!NewCall->getType()->isVoidTy())
NewCall->takeName(CI);
@@ -1426,7 +1440,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
bool variadic = false;
if (const FunctionProtoType *fpt = D->getType()->getAs<FunctionProtoType>())
variadic = fpt->isVariadic();
- const llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic, false);
+ const llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic);
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1597,10 +1611,10 @@ llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
}
-llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
- unsigned NumTys) {
- return llvm::Intrinsic::getDeclaration(&getModule(),
- (llvm::Intrinsic::ID)IID, Tys, NumTys);
+llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
+ llvm::ArrayRef<llvm::Type*> Tys) {
+ return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
+ Tys);
}
static llvm::StringMapEntry<llvm::Constant*> &
@@ -1997,6 +2011,7 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
false, true, false, ObjCMethodDecl::Required);
D->addInstanceMethod(DTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
+ D->setHasCXXStructors(true);
}
// If the implementation doesn't have any ivar initializers, we don't need
@@ -2015,6 +2030,7 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
ObjCMethodDecl::Required);
D->addInstanceMethod(CTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
+ D->setHasCXXStructors(true);
}
/// EmitNamespace - Emit all declarations in a namespace.
@@ -2276,7 +2292,7 @@ llvm::Constant *CodeGenModule::getBlockObjectDispose() {
}
// Otherwise construct the function by hand.
- const llvm::Type *args[] = { Int8PtrTy, Int32Ty };
+ llvm::Type *args[] = { Int8PtrTy, Int32Ty };
const llvm::FunctionType *fty
= llvm::FunctionType::get(VoidTy, args, false);
return BlockObjectDispose =
@@ -2295,7 +2311,7 @@ llvm::Constant *CodeGenModule::getBlockObjectAssign() {
}
// Otherwise construct the function by hand.
- const llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
const llvm::FunctionType *fty
= llvm::FunctionType::get(VoidTy, args, false);
return BlockObjectAssign =
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 779a352..86fb6d4 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -19,10 +19,10 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Mangle.h"
#include "CGVTables.h"
#include "CodeGenTypes.h"
-#include "GlobalDecl.h"
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
@@ -33,6 +33,7 @@
namespace llvm {
class Module;
class Constant;
+ class ConstantInt;
class Function;
class GlobalValue;
class TargetData;
@@ -98,31 +99,31 @@ namespace CodeGen {
struct CodeGenTypeCache {
/// void
- const llvm::Type *VoidTy;
+ llvm::Type *VoidTy;
/// i8, i32, and i64
- const llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty;
+ llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty;
/// int
- const llvm::IntegerType *IntTy;
+ llvm::IntegerType *IntTy;
/// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
union {
- const llvm::IntegerType *IntPtrTy;
- const llvm::IntegerType *SizeTy;
- const llvm::IntegerType *PtrDiffTy;
+ llvm::IntegerType *IntPtrTy;
+ llvm::IntegerType *SizeTy;
+ llvm::IntegerType *PtrDiffTy;
};
/// void* in address space 0
union {
- const llvm::PointerType *VoidPtrTy;
- const llvm::PointerType *Int8PtrTy;
+ llvm::PointerType *VoidPtrTy;
+ llvm::PointerType *Int8PtrTy;
};
/// void** in address space 0
union {
- const llvm::PointerType *VoidPtrPtrTy;
- const llvm::PointerType *Int8PtrPtrTy;
+ llvm::PointerType *VoidPtrPtrTy;
+ llvm::PointerType *Int8PtrPtrTy;
};
/// The width of a pointer into the generic address space.
@@ -131,6 +132,71 @@ namespace CodeGen {
/// The alignment of a pointer into the generic address space.
unsigned char PointerAlignInBytes;
};
+
+struct RREntrypoints {
+ RREntrypoints() { memset(this, 0, sizeof(*this)); }
+ /// void objc_autoreleasePoolPop(void*);
+ llvm::Constant *objc_autoreleasePoolPop;
+
+ /// void *objc_autoreleasePoolPush(void);
+ llvm::Constant *objc_autoreleasePoolPush;
+};
+
+struct ARCEntrypoints {
+ ARCEntrypoints() { memset(this, 0, sizeof(*this)); }
+
+ /// id objc_autorelease(id);
+ llvm::Constant *objc_autorelease;
+
+ /// id objc_autoreleaseReturnValue(id);
+ llvm::Constant *objc_autoreleaseReturnValue;
+
+ /// void objc_copyWeak(id *dest, id *src);
+ llvm::Constant *objc_copyWeak;
+
+ /// void objc_destroyWeak(id*);
+ llvm::Constant *objc_destroyWeak;
+
+ /// id objc_initWeak(id*, id);
+ llvm::Constant *objc_initWeak;
+
+ /// id objc_loadWeak(id*);
+ llvm::Constant *objc_loadWeak;
+
+ /// id objc_loadWeakRetained(id*);
+ llvm::Constant *objc_loadWeakRetained;
+
+ /// void objc_moveWeak(id *dest, id *src);
+ llvm::Constant *objc_moveWeak;
+
+ /// id objc_retain(id);
+ llvm::Constant *objc_retain;
+
+ /// id objc_retainAutorelease(id);
+ llvm::Constant *objc_retainAutorelease;
+
+ /// id objc_retainAutoreleaseReturnValue(id);
+ llvm::Constant *objc_retainAutoreleaseReturnValue;
+
+ /// id objc_retainAutoreleasedReturnValue(id);
+ llvm::Constant *objc_retainAutoreleasedReturnValue;
+
+ /// id objc_retainBlock(id);
+ llvm::Constant *objc_retainBlock;
+
+ /// void objc_release(id);
+ llvm::Constant *objc_release;
+
+ /// id objc_storeStrong(id*, id);
+ llvm::Constant *objc_storeStrong;
+
+ /// id objc_storeWeak(id*, id);
+ llvm::Constant *objc_storeWeak;
+
+ /// A void(void) inline asm to use to mark that the return value of
+ /// a call will be immediately retain.
+ llvm::InlineAsm *retainAutoreleasedReturnValueMarker;
+};
/// CodeGenModule - This class organizes the cross-function state that is used
/// while generating LLVM code.
@@ -157,6 +223,8 @@ class CodeGenModule : public CodeGenTypeCache {
CGObjCRuntime* Runtime;
CGDebugInfo* DebugInfo;
+ ARCEntrypoints *ARCData;
+ RREntrypoints *RRData;
// WeakRefReferences - A set of references that have only been seen via
// a weakref so far. This is used to remove the weak of the reference if we ever
@@ -244,8 +312,8 @@ class CodeGenModule : public CodeGenTypeCache {
llvm::Constant *BlockObjectAssign;
llvm::Constant *BlockObjectDispose;
- const llvm::Type *BlockDescriptorType;
- const llvm::Type *GenericBlockLiteralType;
+ llvm::Type *BlockDescriptorType;
+ llvm::Type *GenericBlockLiteralType;
struct {
int GlobalUniqueCount;
@@ -275,6 +343,16 @@ public:
/// getCXXABI() - Return a reference to the configured C++ ABI.
CGCXXABI &getCXXABI() { return ABI; }
+ ARCEntrypoints &getARCEntrypoints() const {
+ assert(getLangOptions().ObjCAutoRefCount && ARCData != 0);
+ return *ARCData;
+ }
+
+ RREntrypoints &getRREntrypoints() const {
+ assert(RRData != 0);
+ return *RRData;
+ }
+
llvm::Value *getStaticLocalDeclAddress(const VarDecl *VD) {
return StaticLocalDeclMap[VD];
}
@@ -305,6 +383,9 @@ public:
static void DecorateInstruction(llvm::Instruction *Inst,
llvm::MDNode *TBAAInfo);
+ /// getSize - Emit the given number of characters as a value of type size_t.
+ llvm::ConstantInt *getSize(CharUnits numChars);
+
/// setGlobalVisibility - Set the visibility for the given LLVM
/// GlobalValue.
void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
@@ -422,10 +503,10 @@ public:
/// getBlockDescriptorType - Fetches the type of a generic block
/// descriptor.
- const llvm::Type *getBlockDescriptorType();
+ llvm::Type *getBlockDescriptorType();
/// getGenericBlockLiteralType - The type of a generic block literal.
- const llvm::Type *getGenericBlockLiteralType();
+ llvm::Type *getGenericBlockLiteralType();
/// GetAddrOfGlobalBlock - Gets the address of a block which
/// requires no captures.
@@ -474,7 +555,7 @@ public:
/// created).
llvm::Constant *GetAddrOfConstantCString(const std::string &str,
const char *GlobalName=0);
-
+
/// GetAddrOfCXXConstructor - Return the address of the constructor of the
/// given type.
llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
@@ -492,8 +573,8 @@ public:
llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID);
- llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0,
- unsigned NumTys = 0);
+ llvm::Function *getIntrinsic(unsigned IID, llvm::ArrayRef<llvm::Type*> Tys =
+ llvm::ArrayRef<llvm::Type*>());
/// EmitTopLevelDecl - Emit code for a single top level declaration.
void EmitTopLevelDecl(Decl *D);
@@ -514,7 +595,9 @@ public:
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
llvm::Constant *CreateRuntimeFunction(const llvm::FunctionType *Ty,
- llvm::StringRef Name);
+ llvm::StringRef Name,
+ llvm::Attributes ExtraAttrs =
+ llvm::Attribute::None);
/// CreateRuntimeVariable - Create a new runtime global variable with the
/// specified type and name.
llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
@@ -642,7 +725,9 @@ private:
llvm::Constant *GetOrCreateLLVMFunction(llvm::StringRef MangledName,
const llvm::Type *Ty,
GlobalDecl D,
- bool ForVTable);
+ bool ForVTable,
+ llvm::Attributes ExtraAttrs =
+ llvm::Attribute::None);
llvm::Constant *GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
const llvm::PointerType *PTy,
const VarDecl *D,
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 8db6fe5..764688f 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -28,9 +28,10 @@ using namespace CodeGen;
CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
const llvm::TargetData &TD, const ABIInfo &Info,
- CGCXXABI &CXXABI)
+ CGCXXABI &CXXABI, const CodeGenOptions &CGO)
: Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
- TheABIInfo(Info), TheCXXABI(CXXABI) {
+ TheABIInfo(Info), TheCXXABI(CXXABI), CodeGenOpts(CGO) {
+ SkippedLayout = false;
}
CodeGenTypes::~CodeGenTypes() {
@@ -44,28 +45,8 @@ CodeGenTypes::~CodeGenTypes() {
delete &*I++;
}
-/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
-/// pointers that are referenced but have not been converted yet. This is used
-/// to handle cyclic structures properly.
-void CodeGenTypes::HandleLateResolvedPointers() {
- assert(!PointersToResolve.empty() && "No pointers to resolve!");
-
- // Any pointers that were converted deferred evaluation of their pointee type,
- // creating an opaque type instead. This is in order to avoid problems with
- // circular types. Loop through all these defered pointees, if any, and
- // resolve them now.
- while (!PointersToResolve.empty()) {
- std::pair<QualType, llvm::OpaqueType*> P = PointersToResolve.pop_back_val();
-
- // We can handle bare pointers here because we know that the only pointers
- // to the Opaque type are P.second and from other types. Refining the
- // opqaue type away will invalidate P.second, but we don't mind :).
- const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
- P.second->refineAbstractTypeTo(NT);
- }
-}
-
-void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty,
+void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
+ llvm::StructType *Ty,
llvm::StringRef suffix) {
llvm::SmallString<256> TypeName;
llvm::raw_svector_ostream OS(TypeName);
@@ -93,47 +74,15 @@ void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty,
if (!suffix.empty())
OS << suffix;
- TheModule.addTypeName(OS.str(), Ty);
-}
-
-/// ConvertType - Convert the specified type to its LLVM form.
-const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) {
- const llvm::Type *Result = ConvertTypeRecursive(T);
-
- // If this is a top-level call to ConvertType and sub-conversions caused
- // pointers to get lazily built as opaque types, resolve the pointers, which
- // might cause Result to be merged away.
- if (!IsRecursive && !PointersToResolve.empty()) {
- llvm::PATypeHolder ResultHandle = Result;
- HandleLateResolvedPointers();
- Result = ResultHandle;
- }
- return Result;
-}
-
-const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
- T = Context.getCanonicalType(T);
-
- // See if type is already cached.
- llvm::DenseMap<const Type *, llvm::PATypeHolder>::iterator
- I = TypeCache.find(T.getTypePtr());
- // If type is found in map and this is not a definition for a opaque
- // place holder type then use it. Otherwise, convert type T.
- if (I != TypeCache.end())
- return I->second.get();
-
- const llvm::Type *ResultType = ConvertNewType(T);
- TypeCache.insert(std::make_pair(T.getTypePtr(),
- llvm::PATypeHolder(ResultType)));
- return ResultType;
+ Ty->setName(OS.str());
}
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
-const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){
- const llvm::Type *R = ConvertType(T, IsRecursive);
+llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T){
+ llvm::Type *R = ConvertType(T);
// If this is a non-bool type, don't map it.
if (!R->isIntegerTy(1))
@@ -142,69 +91,178 @@ const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){
// Otherwise, return an integer of the target-specified size.
return llvm::IntegerType::get(getLLVMContext(),
(unsigned)Context.getTypeSize(T));
+}
+
+/// isRecordLayoutComplete - Return true if the specified type is already
+/// completely laid out.
+bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
+ llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
+ RecordDeclTypes.find(Ty);
+ return I != RecordDeclTypes.end() && !I->second->isOpaque();
}
-// Code to verify a given function type is complete, i.e. the return type
-// and all of the argument types are complete.
-const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) {
- const FunctionType *FT = cast<FunctionType>(T);
- if (const TagType* TT = FT->getResultType()->getAs<TagType>())
- if (!TT->getDecl()->isDefinition())
- return TT;
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
- for (unsigned i = 0; i < FPT->getNumArgs(); i++)
- if (const TagType* TT = FPT->getArgType(i)->getAs<TagType>())
- if (!TT->getDecl()->isDefinition())
- return TT;
- return 0;
+static bool
+isSafeToConvert(QualType T, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked);
+
+
+/// isSafeToConvert - Return true if it is safe to convert the specified record
+/// decl to IR and lay it out, false if doing so would cause us to get into a
+/// recursive compilation mess.
+static bool
+isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
+ // If we have already checked this type (maybe the same type is used by-value
+ // multiple times in multiple structure fields, don't check again.
+ if (!AlreadyChecked.insert(RD)) return true;
+
+ const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
+
+ // If this type is already laid out, converting it is a noop.
+ if (CGT.isRecordLayoutComplete(Key)) return true;
+
+ // If this type is currently being laid out, we can't recursively compile it.
+ if (CGT.isRecordBeingLaidOut(Key))
+ return false;
+
+ // If this type would require laying out bases that are currently being laid
+ // out, don't do it. This includes virtual base classes which get laid out
+ // when a class is translated, even though they aren't embedded by-value into
+ // the class.
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator I = CRD->bases_begin(),
+ E = CRD->bases_end(); I != E; ++I)
+ if (!isSafeToConvert(I->getType()->getAs<RecordType>()->getDecl(),
+ CGT, AlreadyChecked))
+ return false;
+ }
+
+ // If this type would require laying out members that are currently being laid
+ // out, don't do it.
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I)
+ if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
+ return false;
+
+ // If there are no problems, lets do it.
+ return true;
+}
+
+/// isSafeToConvert - Return true if it is safe to convert this field type,
+/// which requires the structure elements contained by-value to all be
+/// recursively safe to convert.
+static bool
+isSafeToConvert(QualType T, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
+ T = T.getCanonicalType();
+
+ // If this is a record, check it.
+ if (const RecordType *RT = dyn_cast<RecordType>(T))
+ return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked);
+
+ // If this is an array, check the elements, which are embedded inline.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked);
+
+ // Otherwise, there is no concern about transforming this. We only care about
+ // things that are contained by-value in a structure that can have another
+ // structure as a member.
+ return true;
+}
+
+
+/// isSafeToConvert - Return true if it is safe to convert the specified record
+/// decl to IR and lay it out, false if doing so would cause us to get into a
+/// recursive compilation mess.
+static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
+ // If no structs are being laid out, we can certainly do this one.
+ if (CGT.noRecordsBeingLaidOut()) return true;
+
+ llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked;
+ return isSafeToConvert(RD, CGT, AlreadyChecked);
+}
+
+
+/// isFuncTypeArgumentConvertible - Return true if the specified type in a
+/// function argument or result position can be converted to an IR type at this
+/// point. This boils down to being whether it is complete, as well as whether
+/// we've temporarily deferred expanding the type because we're in a recursive
+/// context.
+bool CodeGenTypes::isFuncTypeArgumentConvertible(QualType Ty) {
+ // If this isn't a tagged type, we can convert it!
+ const TagType *TT = Ty->getAs<TagType>();
+ if (TT == 0) return true;
+
+
+ // If it's a tagged type used by-value, but is just a forward decl, we can't
+ // convert it. Note that getDefinition()==0 is not the same as !isDefinition.
+ if (TT->getDecl()->getDefinition() == 0)
+ return false;
+
+ // If this is an enum, then it is always safe to convert.
+ const RecordType *RT = dyn_cast<RecordType>(TT);
+ if (RT == 0) return true;
+
+ // Otherwise, we have to be careful. If it is a struct that we're in the
+ // process of expanding, then we can't convert the function type. That's ok
+ // though because we must be in a pointer context under the struct, so we can
+ // just convert it to a dummy type.
+ //
+ // We decide this by checking whether ConvertRecordDeclType returns us an
+ // opaque type for a struct that we know is defined.
+ return isSafeToConvert(RT->getDecl(), *this);
+}
+
+
+/// Code to verify a given function type is complete, i.e. the return type
+/// and all of the argument types are complete. Also check to see if we are in
+/// a RS_StructPointer context, and if so whether any struct types have been
+/// pended. If so, we don't want to ask the ABI lowering code to handle a type
+/// that cannot be converted to an IR type.
+bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
+ if (!isFuncTypeArgumentConvertible(FT->getResultType()))
+ return false;
+
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
+ for (unsigned i = 0, e = FPT->getNumArgs(); i != e; i++)
+ if (!isFuncTypeArgumentConvertible(FPT->getArgType(i)))
+ return false;
+
+ return true;
}
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
- const Type *Key = Context.getTagDeclType(TD).getTypePtr();
- llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
- TagDeclTypes.find(Key);
- if (TDTI == TagDeclTypes.end()) return;
-
- // Remember the opaque LLVM type for this tagdecl.
- llvm::PATypeHolder OpaqueHolder = TDTI->second;
- assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
- "Updating compilation of an already non-opaque type?");
-
- // Remove it from TagDeclTypes so that it will be regenerated.
- TagDeclTypes.erase(TDTI);
-
- // Generate the new type.
- const llvm::Type *NT = ConvertTagDeclType(TD);
-
- // Refine the old opaque type to its new definition.
- cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
-
- // Since we just completed a tag type, check to see if any function types
- // were completed along with the tag type.
- // FIXME: This is very inefficient; if we track which function types depend
- // on which tag types, though, it should be reasonably efficient.
- llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
- for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
- if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
- // This function type still depends on an incomplete tag type; make sure
- // that tag type has an associated opaque type.
- ConvertTagDeclType(TT->getDecl());
- } else {
- // This function no longer depends on an incomplete tag type; create the
- // function type, and refine the opaque type to the new function type.
- llvm::PATypeHolder OpaqueHolder = i->second;
- const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
- cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
- FunctionTypes.erase(i);
+ // If this is an enum being completed, then we flush all non-struct types from
+ // the cache. This allows function types and other things that may be derived
+ // from the enum to be recomputed.
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
+ // Only flush the cache if we've actually already converted this type.
+ if (TypeCache.count(ED->getTypeForDecl())) {
+ // Okay, we formed some types based on this. We speculated that the enum
+ // would be lowered to i32, so we only need to flush the cache if this
+ // didn't happen.
+ if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
+ TypeCache.clear();
}
+ return;
}
+
+ // If we completed a RecordDecl that we previously used and converted to an
+ // anonymous type, then go ahead and complete it now.
+ const RecordDecl *RD = cast<RecordDecl>(TD);
+ if (RD->isDependentType()) return;
+
+ // Only complete it if we converted it already. If we haven't converted it
+ // yet, we'll just do it lazily.
+ if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
+ ConvertRecordDeclType(RD);
}
-static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
- const llvm::fltSemantics &format) {
+static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
+ const llvm::fltSemantics &format) {
if (&format == &llvm::APFloat::IEEEsingle)
return llvm::Type::getFloatTy(VMContext);
if (&format == &llvm::APFloat::IEEEdouble)
@@ -219,10 +277,26 @@ static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
return 0;
}
-const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
- const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr();
+/// ConvertType - Convert the specified type to its LLVM form.
+llvm::Type *CodeGenTypes::ConvertType(QualType T) {
+ T = Context.getCanonicalType(T);
+
+ const Type *Ty = T.getTypePtr();
- switch (Ty.getTypeClass()) {
+ // RecordTypes are cached and processed specially.
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ return ConvertRecordDeclType(RT->getDecl());
+
+ // See if type is already cached.
+ llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty);
+ // If type is found in map then use it. Otherwise, convert type T.
+ if (TCI != TypeCache.end())
+ return TCI->second;
+
+ // If we don't have it in the cache, convert it now.
+ llvm::Type *ResultType = 0;
+ switch (Ty->getTypeClass()) {
+ case Type::Record: // Handled above.
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
@@ -233,18 +307,20 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
break;
case Type::Builtin: {
- switch (cast<BuiltinType>(Ty).getKind()) {
+ switch (cast<BuiltinType>(Ty)->getKind()) {
case BuiltinType::Void:
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
// LLVM void type can only be used as the result of a function call. Just
// map to the same as char.
- return llvm::Type::getInt8Ty(getLLVMContext());
+ ResultType = llvm::Type::getInt8Ty(getLLVMContext());
+ break;
case BuiltinType::Bool:
// Note that we always return bool as i1 for use as a scalar type.
- return llvm::Type::getInt1Ty(getLLVMContext());
+ ResultType = llvm::Type::getInt1Ty(getLLVMContext());
+ break;
case BuiltinType::Char_S:
case BuiltinType::Char_U:
@@ -262,24 +338,26 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::WChar_U:
case BuiltinType::Char16:
case BuiltinType::Char32:
- return llvm::IntegerType::get(getLLVMContext(),
- static_cast<unsigned>(Context.getTypeSize(T)));
+ ResultType = llvm::IntegerType::get(getLLVMContext(),
+ static_cast<unsigned>(Context.getTypeSize(T)));
+ break;
case BuiltinType::Float:
case BuiltinType::Double:
case BuiltinType::LongDouble:
- return getTypeForFormat(getLLVMContext(),
- Context.getFloatTypeSemantics(T));
+ ResultType = getTypeForFormat(getLLVMContext(),
+ Context.getFloatTypeSemantics(T));
+ break;
- case BuiltinType::NullPtr: {
+ case BuiltinType::NullPtr:
// Model std::nullptr_t as i8*
- const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
- return llvm::PointerType::getUnqual(Ty);
- }
+ ResultType = llvm::Type::getInt8PtrTy(getLLVMContext());
+ break;
case BuiltinType::UInt128:
case BuiltinType::Int128:
- return llvm::IntegerType::get(getLLVMContext(), 128);
+ ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
+ break;
case BuiltinType::Overload:
case BuiltinType::Dependent:
@@ -288,107 +366,145 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
llvm_unreachable("Unexpected placeholder builtin type!");
break;
}
- llvm_unreachable("Unknown builtin type!");
break;
}
case Type::Complex: {
- const llvm::Type *EltTy =
- ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
- return llvm::StructType::get(TheModule.getContext(), EltTy, EltTy, NULL);
+ llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
+ ResultType = llvm::StructType::get(EltTy, EltTy, NULL);
+ break;
}
case Type::LValueReference:
case Type::RValueReference: {
- const ReferenceType &RTy = cast<ReferenceType>(Ty);
- QualType ETy = RTy.getPointeeType();
- llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
- PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ const ReferenceType *RTy = cast<ReferenceType>(Ty);
+ QualType ETy = RTy->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(ETy);
unsigned AS = Context.getTargetAddressSpace(ETy);
- return llvm::PointerType::get(PointeeType, AS);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
}
case Type::Pointer: {
- const PointerType &PTy = cast<PointerType>(Ty);
- QualType ETy = PTy.getPointeeType();
- llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
- PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ const PointerType *PTy = cast<PointerType>(Ty);
+ QualType ETy = PTy->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(ETy);
+ if (PointeeType->isVoidTy())
+ PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
unsigned AS = Context.getTargetAddressSpace(ETy);
- return llvm::PointerType::get(PointeeType, AS);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
}
case Type::VariableArray: {
- const VariableArrayType &A = cast<VariableArrayType>(Ty);
- assert(A.getIndexTypeCVRQualifiers() == 0 &&
+ const VariableArrayType *A = cast<VariableArrayType>(Ty);
+ assert(A->getIndexTypeCVRQualifiers() == 0 &&
"FIXME: We only handle trivial array types so far!");
// VLAs resolve to the innermost element type; this matches
// the return of alloca, and there isn't any obviously better choice.
- return ConvertTypeForMemRecursive(A.getElementType());
+ ResultType = ConvertTypeForMem(A->getElementType());
+ break;
}
case Type::IncompleteArray: {
- const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
- assert(A.getIndexTypeCVRQualifiers() == 0 &&
+ const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
+ assert(A->getIndexTypeCVRQualifiers() == 0 &&
"FIXME: We only handle trivial array types so far!");
- // int X[] -> [0 x int]
- return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()),
- 0);
+ // int X[] -> [0 x int], unless the element type is not sized. If it is
+ // unsized (e.g. an incomplete struct) just use [0 x i8].
+ ResultType = ConvertTypeForMem(A->getElementType());
+ if (!ResultType->isSized()) {
+ SkippedLayout = true;
+ ResultType = llvm::Type::getInt8Ty(getLLVMContext());
+ }
+ ResultType = llvm::ArrayType::get(ResultType, 0);
+ break;
}
case Type::ConstantArray: {
- const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
- const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
- return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
+ const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
+ const llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
+ ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
+ break;
}
case Type::ExtVector:
case Type::Vector: {
- const VectorType &VT = cast<VectorType>(Ty);
- return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
- VT.getNumElements());
+ const VectorType *VT = cast<VectorType>(Ty);
+ ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()),
+ VT->getNumElements());
+ break;
}
case Type::FunctionNoProto:
case Type::FunctionProto: {
+ const FunctionType *FT = cast<FunctionType>(Ty);
// First, check whether we can build the full function type. If the
// function type depends on an incomplete type (e.g. a struct or enum), we
- // cannot lower the function type. Instead, turn it into an Opaque pointer
- // and have UpdateCompletedType revisit the function type when/if the opaque
- // argument type is defined.
- if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) {
- // This function's type depends on an incomplete tag type; make sure
- // we have an opaque type corresponding to the tag type.
- ConvertTagDeclType(TT->getDecl());
- // Create an opaque type for this function type, save it, and return it.
- llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
- FunctionTypes.insert(std::make_pair(&Ty, ResultType));
- return ResultType;
+ // cannot lower the function type.
+ if (!isFuncTypeConvertible(FT)) {
+ // This function's type depends on an incomplete tag type.
+ // Return a placeholder type.
+ ResultType = llvm::StructType::get(getLLVMContext());
+
+ SkippedLayout = true;
+ break;
+ }
+
+ // While we're converting the argument types for a function, we don't want
+ // to recursively convert any pointed-to structs. Converting directly-used
+ // structs is ok though.
+ if (!RecordsBeingLaidOut.insert(Ty)) {
+ ResultType = llvm::StructType::get(getLLVMContext());
+
+ SkippedLayout = true;
+ break;
}
// The function type can be built; call the appropriate routines to
// build it.
const CGFunctionInfo *FI;
bool isVariadic;
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) {
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
FI = &getFunctionInfo(
- CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)),
- true /*Recursive*/);
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
isVariadic = FPT->isVariadic();
} else {
- const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
FI = &getFunctionInfo(
- CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)),
- true /*Recursive*/);
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
isVariadic = true;
}
+
+ // If there is something higher level prodding our CGFunctionInfo, then
+ // don't recurse into it again.
+ if (FunctionsBeingProcessed.count(FI)) {
+
+ ResultType = llvm::StructType::get(getLLVMContext());
+ SkippedLayout = true;
+ } else {
- return GetFunctionType(*FI, isVariadic, true);
+ // Otherwise, we're good to go, go ahead and convert it.
+ ResultType = GetFunctionType(*FI, isVariadic);
+ }
+
+ RecordsBeingLaidOut.erase(Ty);
+
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
+ break;
}
case Type::ObjCObject:
- return ConvertTypeRecursive(cast<ObjCObjectType>(Ty).getBaseType());
+ ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
+ break;
case Type::ObjCInterface: {
// Objective-C interfaces are always opaque (outside of the
// runtime, which can do whatever it likes); we never refine
// these.
- const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
+ llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
if (!T)
- T = llvm::OpaqueType::get(getLLVMContext());
- return T;
+ T = llvm::StructType::createNamed(getLLVMContext(), "");
+ ResultType = T;
+ break;
}
case Type::ObjCObjectPointer: {
@@ -396,103 +512,105 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
// pointer to the underlying interface type. We don't need to worry about
// recursive conversion.
const llvm::Type *T =
- ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType());
- return llvm::PointerType::getUnqual(T);
+ ConvertType(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ ResultType = T->getPointerTo();
+ break;
}
- case Type::Record:
case Type::Enum: {
- const TagDecl *TD = cast<TagType>(Ty).getDecl();
- const llvm::Type *Res = ConvertTagDeclType(TD);
-
- if (const RecordDecl *RD = dyn_cast<RecordDecl>(TD))
- addRecordTypeName(RD, Res, llvm::StringRef());
- return Res;
+ const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
+ if (ED->isDefinition() || ED->isFixed())
+ return ConvertType(ED->getIntegerType());
+ // Return a placeholder 'i32' type. This can be changed later when the
+ // type is defined (see UpdateCompletedType), but is likely to be the
+ // "right" answer.
+ ResultType = llvm::Type::getInt32Ty(getLLVMContext());
+ break;
}
case Type::BlockPointer: {
- const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
- llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
- PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
+ const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(FTy);
unsigned AS = Context.getTargetAddressSpace(FTy);
- return llvm::PointerType::get(PointeeType, AS);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
}
case Type::MemberPointer: {
- return getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(&Ty));
+ ResultType =
+ getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty));
+ break;
}
}
-
- // FIXME: implement.
- return llvm::OpaqueType::get(getLLVMContext());
+
+ assert(ResultType && "Didn't convert a type?");
+
+ TypeCache[Ty] = ResultType;
+ return ResultType;
}
-/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
-/// enum.
-const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
+/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
+llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
// TagDecl's are not necessarily unique, instead use the (clang)
// type connected to the decl.
- const Type *Key =
- Context.getTagDeclType(TD).getTypePtr();
- llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
- TagDeclTypes.find(Key);
-
- // If we've already compiled this tag type, use the previous definition.
- if (TDTI != TagDeclTypes.end())
- return TDTI->second;
-
- const EnumDecl *ED = dyn_cast<EnumDecl>(TD);
-
- // If this is still a forward declaration, just define an opaque
- // type to use for this tagged decl.
- // C++0x: If this is a enumeration type with fixed underlying type,
- // consider it complete.
- if (!TD->isDefinition() && !(ED && ED->isFixed())) {
- llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
- TagDeclTypes.insert(std::make_pair(Key, ResultType));
- return ResultType;
- }
-
- // Okay, this is a definition of a type. Compile the implementation now.
-
- if (ED) // Don't bother storing enums in TagDeclTypes.
- return ConvertTypeRecursive(ED->getIntegerType());
+ const Type *Key = Context.getTagDeclType(RD).getTypePtr();
- // This decl could well be recursive. In this case, insert an opaque
- // definition of this type, which the recursive uses will get. We will then
- // refine this opaque version later.
+ llvm::StructType *&Entry = RecordDeclTypes[Key];
- // Create new OpaqueType now for later use in case this is a recursive
- // type. This will later be refined to the actual type.
- llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext());
- TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
+ // If we don't have a StructType at all yet, create the forward declaration.
+ if (Entry == 0) {
+ Entry = llvm::StructType::createNamed(getLLVMContext(), "");
+ addRecordTypeName(RD, Entry, "");
+ }
+ llvm::StructType *Ty = Entry;
- const RecordDecl *RD = cast<const RecordDecl>(TD);
+ // If this is still a forward declaration, or the LLVM type is already
+ // complete, there's nothing more to do.
+ RD = RD->getDefinition();
+ if (RD == 0 || !Ty->isOpaque())
+ return Ty;
+
+ // If converting this type would cause us to infinitely loop, don't do it!
+ if (!isSafeToConvert(RD, *this)) {
+ DeferredRecords.push_back(RD);
+ return Ty;
+ }
+ // Okay, this is a definition of a type. Compile the implementation now.
+ bool InsertResult = RecordsBeingLaidOut.insert(Key); (void)InsertResult;
+ assert(InsertResult && "Recursively compiling a struct?");
+
// Force conversion of non-virtual base classes recursively.
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) {
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- if (!i->isVirtual()) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- ConvertTagDeclType(Base);
- }
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CRD->bases_begin(),
+ e = CRD->bases_end(); i != e; ++i) {
+ if (i->isVirtual()) continue;
+
+ ConvertRecordDeclType(i->getType()->getAs<RecordType>()->getDecl());
}
}
// Layout fields.
- CGRecordLayout *Layout = ComputeRecordLayout(RD);
-
+ CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty);
CGRecordLayouts[Key] = Layout;
- const llvm::Type *ResultType = Layout->getLLVMType();
- // Refine our Opaque type to ResultType. This can invalidate ResultType, so
- // make sure to read the result out of the holder.
- cast<llvm::OpaqueType>(ResultHolder.get())
- ->refineAbstractTypeTo(ResultType);
+ // We're done laying out this struct.
+ bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
+ assert(EraseResult && "struct not in RecordsBeingLaidOut set?");
+
+ // If this struct blocked a FunctionType conversion, then recompute whatever
+ // was derived from that.
+ // FIXME: This is hugely overconservative.
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ // If we're done converting the outer-most record, then convert any deferred
+ // structs as well.
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
- return ResultHolder.get();
+ return Ty;
}
/// getCGRecordLayout - Return record layout info for the given record decl.
@@ -503,7 +621,7 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
if (!Layout) {
// Compute the type information.
- ConvertTagDeclType(RD);
+ ConvertRecordDeclType(RD);
// Now try again.
Layout = CGRecordLayouts.lookup(Key);
@@ -513,15 +631,6 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
return *Layout;
}
-void CodeGenTypes::addBaseSubobjectTypeName(const CXXRecordDecl *RD,
- const CGRecordLayout &layout) {
- llvm::StringRef suffix;
- if (layout.getBaseSubobjectLLVMType() != layout.getLLVMType())
- suffix = ".base";
-
- addRecordTypeName(RD, layout.getBaseSubobjectLLVMType(), suffix);
-}
-
bool CodeGenTypes::isZeroInitializable(QualType T) {
// No need to check for member pointers when not compiling C++.
if (!Context.getLangOptions().CPlusPlus)
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index ff1eb4c..7c0fb81 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -15,7 +15,7 @@
#define CLANG_CODEGEN_CODEGENTYPES_H
#include "CGCall.h"
-#include "GlobalDecl.h"
+#include "clang/AST/GlobalDecl.h"
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include <vector>
@@ -23,11 +23,10 @@
namespace llvm {
class FunctionType;
class Module;
- class OpaqueType;
- class PATypeHolder;
class TargetData;
class Type;
class LLVMContext;
+ class StructType;
}
namespace clang {
@@ -37,6 +36,7 @@ namespace clang {
class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXMethodDecl;
+ class CodeGenOptions;
class FieldDecl;
class FunctionProtoType;
class ObjCInterfaceDecl;
@@ -58,91 +58,83 @@ namespace CodeGen {
class CodeGenTypes {
ASTContext &Context;
const TargetInfo &Target;
- llvm::Module& TheModule;
- const llvm::TargetData& TheTargetData;
- const ABIInfo& TheABIInfo;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ const ABIInfo &TheABIInfo;
CGCXXABI &TheCXXABI;
-
- llvm::SmallVector<std::pair<QualType,
- llvm::OpaqueType *>, 8> PointersToResolve;
-
- llvm::DenseMap<const Type*, llvm::PATypeHolder> TagDeclTypes;
-
- llvm::DenseMap<const Type*, llvm::PATypeHolder> FunctionTypes;
+ const CodeGenOptions &CodeGenOpts;
/// The opaque type map for Objective-C interfaces. All direct
/// manipulation is done by the runtime interfaces, which are
/// responsible for coercing to the appropriate type; these opaque
/// types are never refined.
- llvm::DenseMap<const ObjCInterfaceType*, const llvm::Type *> InterfaceTypes;
+ llvm::DenseMap<const ObjCInterfaceType*, llvm::Type *> InterfaceTypes;
/// CGRecordLayouts - This maps llvm struct type with corresponding
/// record layout info.
llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+ /// RecordDeclTypes - This contains the LLVM IR type for any converted
+ /// RecordDecl.
+ llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes;
+
/// FunctionInfos - Hold memoized CGFunctionInfo results.
llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+ /// RecordsBeingLaidOut - This set keeps track of records that we're currently
+ /// converting to an IR type. For example, when converting:
+ /// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B'
+ /// types will be in this set.
+ llvm::SmallPtrSet<const Type*, 4> RecordsBeingLaidOut;
+
+ llvm::SmallPtrSet<const CGFunctionInfo*, 4> FunctionsBeingProcessed;
+
+ /// SkippedLayout - True if we didn't layout a function due to a being inside
+ /// a recursive struct conversion, set this to true.
+ bool SkippedLayout;
+
+ llvm::SmallVector<const RecordDecl *, 8> DeferredRecords;
+
private:
- /// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder)
- /// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
- /// used instead of llvm::Type because it allows us to bypass potential
- /// dangling type pointers due to type refinement on llvm side.
- llvm::DenseMap<const Type *, llvm::PATypeHolder> TypeCache;
-
- /// ConvertNewType - Convert type T into a llvm::Type. Do not use this
- /// method directly because it does not do any type caching. This method
- /// is available only for ConvertType(). CovertType() is preferred
- /// interface to convert type T into a llvm::Type.
- const llvm::Type *ConvertNewType(QualType T);
-
- /// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
- /// pointers that are referenced but have not been converted yet. This is
- /// used to handle cyclic structures properly.
- void HandleLateResolvedPointers();
-
- /// addRecordTypeName - Compute a name from the given record decl with an
- /// optional suffix and name the given LLVM type using it.
- void addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty,
- llvm::StringRef suffix);
+ /// TypeCache - This map keeps cache of llvm::Types
+ /// and maps llvm::Types to corresponding clang::Type.
+ llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
public:
CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
- const ABIInfo &Info, CGCXXABI &CXXABI);
+ const ABIInfo &Info, CGCXXABI &CXXABI,
+ const CodeGenOptions &Opts);
~CodeGenTypes();
const llvm::TargetData &getTargetData() const { return TheTargetData; }
const TargetInfo &getTarget() const { return Target; }
ASTContext &getContext() const { return Context; }
const ABIInfo &getABIInfo() const { return TheABIInfo; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
CGCXXABI &getCXXABI() const { return TheCXXABI; }
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
/// ConvertType - Convert type T into a llvm::Type.
- const llvm::Type *ConvertType(QualType T, bool IsRecursive = false);
- const llvm::Type *ConvertTypeRecursive(QualType T);
+ llvm::Type *ConvertType(QualType T);
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
- const llvm::Type *ConvertTypeForMem(QualType T, bool IsRecursive = false);
- const llvm::Type *ConvertTypeForMemRecursive(QualType T) {
- return ConvertTypeForMem(T, true);
- }
+ llvm::Type *ConvertTypeForMem(QualType T);
/// GetFunctionType - Get the LLVM function type for \arg Info.
- const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
- bool IsVariadic,
- bool IsRecursive = false);
+ llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
+ bool IsVariadic);
- const llvm::FunctionType *GetFunctionType(GlobalDecl GD);
+ llvm::FunctionType *GetFunctionType(GlobalDecl GD);
- /// VerifyFuncTypeComplete - Utility to check whether a function type can
+ /// isFuncTypeConvertible - Utility to check whether a function type can
/// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag
/// type).
- static const TagType *VerifyFuncTypeComplete(const Type* T);
-
+ bool isFuncTypeConvertible(const FunctionType *FT);
+ bool isFuncTypeArgumentConvertible(QualType Ty);
+
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
/// given a CXXMethodDecl. If the method to has an incomplete return type,
/// and/or incomplete argument types, this will return the opaque type.
@@ -150,11 +142,6 @@ public:
const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
- /// addBaseSubobjectTypeName - Add a type name for the base subobject of the
- /// given record layout.
- void addBaseSubobjectTypeName(const CXXRecordDecl *RD,
- const CGRecordLayout &layout);
-
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
void UpdateCompletedType(const TagDecl *TD);
@@ -180,10 +167,8 @@ public:
Ty->getExtInfo());
}
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty,
- bool IsRecursive = false);
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty,
- bool IsRecursive = false);
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty);
/// getFunctionInfo - Get the function info for a member function of
/// the given type. This is used for calls through member function
@@ -206,23 +191,27 @@ public:
/// \param ArgTys - must all actually be canonical as params
const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info,
- bool IsRecursive = false);
+ const FunctionType::ExtInfo &Info);
/// \brief Compute a new LLVM record layout object for the given record.
- CGRecordLayout *ComputeRecordLayout(const RecordDecl *D);
+ CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty);
+
+ /// addRecordTypeName - Compute a name from the given record decl with an
+ /// optional suffix and name the given LLVM type using it.
+ void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty,
+ llvm::StringRef suffix);
+
public: // These are internal details of CGT that shouldn't be used externally.
- /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
- /// enum.
- const llvm::Type *ConvertTagDeclType(const TagDecl *TD);
+ /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
+ llvm::StructType *ConvertRecordDeclType(const RecordDecl *TD);
/// GetExpandedTypes - Expand the type \arg Ty into the LLVM
/// argument types it would be passed as on the provided vector \arg
/// ArgTys. See ABIArgInfo::Expand.
void GetExpandedTypes(QualType type,
- llvm::SmallVectorImpl<const llvm::Type*> &expanded,
- bool isRecursive);
+ llvm::SmallVectorImpl<llvm::Type*> &expanded);
/// IsZeroInitializable - Return whether a type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
@@ -231,6 +220,15 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// IsZeroInitializable - Return whether a record type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(const CXXRecordDecl *RD);
+
+ bool isRecordLayoutComplete(const Type *Ty) const;
+ bool noRecordsBeingLaidOut() const {
+ return RecordsBeingLaidOut.empty();
+ }
+ bool isRecordBeingLaidOut(const Type *Ty) const {
+ return RecordsBeingLaidOut.count(Ty);
+ }
+
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/GlobalDecl.h b/lib/CodeGen/GlobalDecl.h
deleted file mode 100644
index c2f36d2..0000000
--- a/lib/CodeGen/GlobalDecl.h
+++ /dev/null
@@ -1,127 +0,0 @@
-//===--- GlobalDecl.h - Global declaration holder ---------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// A GlobalDecl can hold either a regular variable/function or a C++ ctor/dtor
-// together with its type.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_CODEGEN_GLOBALDECL_H
-#define CLANG_CODEGEN_GLOBALDECL_H
-
-#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclObjC.h"
-#include "clang/Basic/ABI.h"
-
-namespace clang {
-
-namespace CodeGen {
-
-/// GlobalDecl - represents a global declaration. This can either be a
-/// CXXConstructorDecl and the constructor type (Base, Complete).
-/// a CXXDestructorDecl and the destructor type (Base, Complete) or
-/// a VarDecl, a FunctionDecl or a BlockDecl.
-class GlobalDecl {
- llvm::PointerIntPair<const Decl*, 2> Value;
-
- void Init(const Decl *D) {
- assert(!isa<CXXConstructorDecl>(D) && "Use other ctor with ctor decls!");
- assert(!isa<CXXDestructorDecl>(D) && "Use other ctor with dtor decls!");
-
- Value.setPointer(D);
- }
-
-public:
- GlobalDecl() {}
-
- GlobalDecl(const VarDecl *D) { Init(D);}
- GlobalDecl(const FunctionDecl *D) { Init(D); }
- GlobalDecl(const BlockDecl *D) { Init(D); }
- GlobalDecl(const ObjCMethodDecl *D) { Init(D); }
-
- GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type)
- : Value(D, Type) {}
- GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
- : Value(D, Type) {}
-
- GlobalDecl getCanonicalDecl() const {
- GlobalDecl CanonGD;
- CanonGD.Value.setPointer(Value.getPointer()->getCanonicalDecl());
- CanonGD.Value.setInt(Value.getInt());
-
- return CanonGD;
- }
-
- const Decl *getDecl() const { return Value.getPointer(); }
-
- CXXCtorType getCtorType() const {
- assert(isa<CXXConstructorDecl>(getDecl()) && "Decl is not a ctor!");
- return static_cast<CXXCtorType>(Value.getInt());
- }
-
- CXXDtorType getDtorType() const {
- assert(isa<CXXDestructorDecl>(getDecl()) && "Decl is not a dtor!");
- return static_cast<CXXDtorType>(Value.getInt());
- }
-
- friend bool operator==(const GlobalDecl &LHS, const GlobalDecl &RHS) {
- return LHS.Value == RHS.Value;
- }
-
- void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
-
- static GlobalDecl getFromOpaquePtr(void *P) {
- GlobalDecl GD;
- GD.Value.setFromOpaqueValue(P);
- return GD;
- }
-
- GlobalDecl getWithDecl(const Decl *D) {
- GlobalDecl Result(*this);
- Result.Value.setPointer(D);
- return Result;
- }
-};
-
-} // end namespace CodeGen
-} // end namespace clang
-
-namespace llvm {
- template<class> struct DenseMapInfo;
-
- template<> struct DenseMapInfo<clang::CodeGen::GlobalDecl> {
- static inline clang::CodeGen::GlobalDecl getEmptyKey() {
- return clang::CodeGen::GlobalDecl();
- }
-
- static inline clang::CodeGen::GlobalDecl getTombstoneKey() {
- return clang::CodeGen::GlobalDecl::
- getFromOpaquePtr(reinterpret_cast<void*>(-1));
- }
-
- static unsigned getHashValue(clang::CodeGen::GlobalDecl GD) {
- return DenseMapInfo<void*>::getHashValue(GD.getAsOpaquePtr());
- }
-
- static bool isEqual(clang::CodeGen::GlobalDecl LHS,
- clang::CodeGen::GlobalDecl RHS) {
- return LHS == RHS;
- }
-
- };
-
- // GlobalDecl isn't *technically* a POD type. However, its copy constructor,
- // copy assignment operator, and destructor are all trivial.
- template <>
- struct isPodLike<clang::CodeGen::GlobalDecl> {
- static const bool value = true;
- };
-} // end namespace llvm
-
-#endif
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 12ef9bd..0c86080f 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -24,6 +24,7 @@
#include "CodeGenModule.h"
#include <clang/AST/Mangle.h>
#include <clang/AST/Type.h>
+#include <llvm/Intrinsics.h>
#include <llvm/Target/TargetData.h>
#include <llvm/Value.h>
@@ -33,15 +34,15 @@ using namespace CodeGen;
namespace {
class ItaniumCXXABI : public CodeGen::CGCXXABI {
private:
- const llvm::IntegerType *PtrDiffTy;
+ llvm::IntegerType *PtrDiffTy;
protected:
bool IsARM;
// It's a little silly for us to cache this.
- const llvm::IntegerType *getPtrDiffTy() {
+ llvm::IntegerType *getPtrDiffTy() {
if (!PtrDiffTy) {
QualType T = getContext().getPointerDiffType();
- const llvm::Type *Ty = CGM.getTypes().ConvertTypeRecursive(T);
+ llvm::Type *Ty = CGM.getTypes().ConvertType(T);
PtrDiffTy = cast<llvm::IntegerType>(Ty);
}
return PtrDiffTy;
@@ -57,7 +58,7 @@ public:
bool isZeroInitializable(const MemberPointerType *MPT);
- const llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
+ llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
llvm::Value *&This,
@@ -175,13 +176,11 @@ CodeGen::CGCXXABI *CodeGen::CreateARMCXXABI(CodeGenModule &CGM) {
return new ARMCXXABI(CGM);
}
-const llvm::Type *
+llvm::Type *
ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
if (MPT->isMemberDataPointer())
return getPtrDiffTy();
- else
- return llvm::StructType::get(CGM.getLLVMContext(),
- getPtrDiffTy(), getPtrDiffTy(), NULL);
+ return llvm::StructType::get(getPtrDiffTy(), getPtrDiffTy(), NULL);
}
/// In the Itanium and ARM ABIs, method pointers have the form:
@@ -473,8 +472,7 @@ ItaniumCXXABI::EmitMemberPointerConversion(llvm::Constant *C,
else
Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
- return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
- /*Packed=*/false);
+ return llvm::ConstantStruct::get(CS->getType(), Values);
}
@@ -489,8 +487,7 @@ ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
llvm::Constant *Zero = llvm::ConstantInt::get(ptrdiff_t, 0);
llvm::Constant *Values[2] = { Zero, Zero };
- return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
- /*Packed=*/false);
+ return llvm::ConstantStruct::getAnon(Values);
}
llvm::Constant *
@@ -540,7 +537,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
const llvm::Type *Ty;
// Check whether the function has a computable LLVM signature.
- if (!CodeGenTypes::VerifyFuncTypeComplete(FPT)) {
+ if (Types.isFuncTypeConvertible(FPT)) {
// The function has a computable LLVM signature; use the correct type.
Ty = Types.GetFunctionType(Types.getFunctionInfo(MD),
FPT->isVariadic());
@@ -555,8 +552,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
}
- return llvm::ConstantStruct::get(CGM.getLLVMContext(),
- MemPtr, 2, /*Packed=*/false);
+ return llvm::ConstantStruct::getAnon(MemPtr);
}
/// The comparison algorithm is pretty easy: the member pointers are
@@ -802,10 +798,27 @@ bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
if (expr->doesUsualArrayDeleteWantSize())
return true;
+ // Automatic Reference Counting:
+ // We need an array cookie for pointers with strong or weak lifetime.
+ QualType AllocatedType = expr->getAllocatedType();
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ AllocatedType->isObjCLifetimeType()) {
+ switch (AllocatedType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return true;
+ }
+ }
+
// Otherwise, if the class has a non-trivial destructor, it always
// needs a cookie.
const CXXRecordDecl *record =
- expr->getAllocatedType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ AllocatedType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
return (record && !record->hasTrivialDestructor());
}
@@ -816,6 +829,22 @@ bool ItaniumCXXABI::NeedsArrayCookie(const CXXDeleteExpr *expr,
if (expr->doesUsualArrayDeleteWantSize())
return true;
+ // Automatic Reference Counting:
+ // We need an array cookie for pointers with strong or weak lifetime.
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ elementType->isObjCLifetimeType()) {
+ switch (elementType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return true;
+ }
+ }
+
// Otherwise, if the class has a non-trivial destructor, it always
// needs a cookie.
const CXXRecordDecl *record =
@@ -1005,31 +1034,34 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
/*********************** Static local initialization **************************/
static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
- const llvm::PointerType *GuardPtrTy) {
+ llvm::PointerType *GuardPtrTy) {
// int __cxa_guard_acquire(__guard *guard_object);
+ llvm::Type *ArgTys[] = { GuardPtrTy };
const llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
- GuardPtrTy, /*isVarArg=*/false);
+ ArgTys, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
}
static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
- const llvm::PointerType *GuardPtrTy) {
+ llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_release(__guard *guard_object);
+ llvm::Type *ArgTys[] = { GuardPtrTy };
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- GuardPtrTy, /*isVarArg=*/false);
+ ArgTys, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
}
static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
- const llvm::PointerType *GuardPtrTy) {
+ llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_abort(__guard *guard_object);
+ llvm::Type *ArgTys[] = { GuardPtrTy };
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- GuardPtrTy, /*isVarArg=*/false);
+ ArgTys, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
}
@@ -1039,7 +1071,7 @@ namespace {
llvm::GlobalVariable *Guard;
CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.Builder.CreateCall(getGuardAbortFn(CGF.CGM, Guard->getType()), Guard)
->setDoesNotThrow();
}
@@ -1055,21 +1087,21 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// We only need to use thread-safe statics for local variables;
// global initialization is always single-threaded.
- bool ThreadsafeStatics = (getContext().getLangOptions().ThreadsafeStatics &&
- D.isLocalVarDecl());
+ bool threadsafe =
+ (getContext().getLangOptions().ThreadsafeStatics && D.isLocalVarDecl());
const llvm::IntegerType *GuardTy;
// If we have a global variable with internal linkage and thread-safe statics
// are disabled, we can just let the guard variable be of type i8.
- bool UseInt8GuardVariable = !ThreadsafeStatics && GV->hasInternalLinkage();
- if (UseInt8GuardVariable)
- GuardTy = Builder.getInt8Ty();
- else {
+ bool useInt8GuardVariable = !threadsafe && GV->hasInternalLinkage();
+ if (useInt8GuardVariable) {
+ GuardTy = CGF.Int8Ty;
+ } else {
// Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
- GuardTy = (IsARM ? Builder.getInt32Ty() : Builder.getInt64Ty());
+ GuardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty);
}
- const llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo();
+ llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo();
// Create the guard variable.
llvm::SmallString<256> GuardVName;
@@ -1099,7 +1131,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// if (__cxa_guard_acquire(&obj_guard))
// ...
// }
- if (IsARM && !UseInt8GuardVariable) {
+ if (IsARM && !useInt8GuardVariable) {
llvm::Value *V = Builder.CreateLoad(GuardVariable);
V = Builder.CreateAnd(V, Builder.getInt32(1));
IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
@@ -1130,13 +1162,16 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
+ llvm::BasicBlock *NoCheckBlock = EndBlock;
+ if (threadsafe) NoCheckBlock = CGF.createBasicBlock("init.barrier");
+
// Check if the first byte of the guard variable is zero.
- Builder.CreateCondBr(IsInitialized, InitCheckBlock, EndBlock);
+ Builder.CreateCondBr(IsInitialized, InitCheckBlock, NoCheckBlock);
CGF.EmitBlock(InitCheckBlock);
// Variables used when coping with thread-safe statics and exceptions.
- if (ThreadsafeStatics) {
+ if (threadsafe) {
// Call __cxa_guard_acquire.
llvm::Value *V
= Builder.CreateCall(getGuardAcquireFn(CGM, GuardPtrTy), GuardVariable);
@@ -1155,7 +1190,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// Emit the initializer and add a global destructor if appropriate.
CGF.EmitCXXGlobalVarDeclInit(D, GV);
- if (ThreadsafeStatics) {
+ if (threadsafe) {
// Pop the guard-abort cleanup if we pushed one.
CGF.PopCleanupBlock();
@@ -1165,5 +1200,23 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable);
}
+ // Emit an acquire memory barrier if using thread-safe statics:
+ // Itanium ABI:
+ // An implementation supporting thread-safety on multiprocessor
+ // systems must also guarantee that references to the initialized
+ // object do not occur before the load of the initialization flag.
+ if (threadsafe) {
+ Builder.CreateBr(EndBlock);
+ CGF.EmitBlock(NoCheckBlock);
+
+ llvm::Value *_false = Builder.getFalse();
+ llvm::Value *_true = Builder.getTrue();
+
+ Builder.CreateCall5(CGM.getIntrinsic(llvm::Intrinsic::memory_barrier),
+ /* load-load, load-store */ _true, _true,
+ /* store-load, store-store */ _false, _false,
+ /* device or I/O */ _false);
+ }
+
CGF.EmitBlock(EndBlock);
}
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 043ead7..df2c1bd 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -75,7 +75,7 @@ void ABIArgInfo::dump() const {
break;
case Indirect:
OS << "Indirect Align=" << getIndirectAlign()
- << " Byal=" << getIndirectByVal()
+ << " ByVal=" << getIndirectByVal()
<< " Realign=" << getIndirectRealign();
break;
case Expand:
@@ -356,9 +356,9 @@ bool UseX86_MMXType(const llvm::Type *IRType) {
IRType->getScalarSizeInBits() != 64;
}
-static const llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) {
+static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) {
if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy())
return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
return Ty;
@@ -374,6 +374,7 @@ class X86_32ABIInfo : public ABIInfo {
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
+ bool IsMMXDisabled;
static bool isRegisterSize(unsigned Size) {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
@@ -403,14 +404,15 @@ public:
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
- : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p) {}
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m)
+ : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
+ IsMMXDisabled(m) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
- :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p)) {}
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
@@ -425,9 +427,9 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const;
- const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) const {
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) const {
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
@@ -562,7 +564,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
} else if (SeltTy->isPointerType()) {
// FIXME: It would be really nice if this could come out as the proper
// pointer type.
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
+ llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
return ABIArgInfo::getDirect(PtrTy);
} else if (SeltTy->isVectorType()) {
// 64- and 128-bit vectors are never returned in a
@@ -699,8 +701,11 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
Size));
}
- const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+ llvm::Type *IRType = CGT.ConvertType(Ty);
if (UseX86_MMXType(IRType)) {
+ if (IsMMXDisabled)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ 64));
ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
return AAI;
@@ -820,6 +825,22 @@ class X86_64ABIInfo : public ABIInfo {
/// should just return Memory for the aggregate).
static Class merge(Class Accum, Class Field);
+ /// postMerge - Implement the X86_64 ABI post merging algorithm.
+ ///
+ /// Post merger cleanup, reduces a malformed Hi and Lo pair to
+ /// final MEMORY or SSE classes when necessary.
+ ///
+ /// \param AggregateSize - The size of the current aggregate in
+ /// the classification process.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the higher words of the containing object.
+ ///
+ void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
+
/// classify - Determine the x86_64 register classes in which the
/// given type T should be passed.
///
@@ -843,13 +864,13 @@ class X86_64ABIInfo : public ABIInfo {
/// also be ComplexX87.
void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
- const llvm::Type *Get16ByteVectorType(QualType Ty) const;
- const llvm::Type *GetSSETypeAtOffset(const llvm::Type *IRType,
- unsigned IROffset, QualType SourceTy,
- unsigned SourceOffset) const;
- const llvm::Type *GetINTEGERTypeAtOffset(const llvm::Type *IRType,
- unsigned IROffset, QualType SourceTy,
- unsigned SourceOffset) const;
+ llvm::Type *GetByteVectorType(QualType Ty) const;
+ llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+ llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be returned in memory.
@@ -921,9 +942,9 @@ public:
return false;
}
- const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) const {
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) const {
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
@@ -956,6 +977,39 @@ public:
}
+void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
+ Class &Hi) const {
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is Memory, the whole argument is passed in
+ // memory.
+ //
+ // (b) If X87UP is not preceded by X87, the whole argument is passed in
+ // memory.
+ //
+ // (c) If the size of the aggregate exceeds two eightbytes and the first
+ // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
+ // argument is passed in memory. NOTE: This is necessary to keep the
+ // ABI working for processors that don't support the __m256 type.
+ //
+ // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
+ //
+ // Some of these are enforced by the merging logic. Others can arise
+ // only with unions; for example:
+ // union { _Complex double; unsigned; }
+ //
+ // Note that clauses (b) and (c) were added in 0.98.
+ //
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
+ Lo = Memory;
+ if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+}
+
X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
// AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
// classified recursively so that always two fields are
@@ -1082,7 +1136,14 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// split.
if (OffsetBase && OffsetBase != 64)
Hi = Lo;
- } else if (Size == 128) {
+ } else if (Size == 128 || Size == 256) {
+ // Arguments of 256-bits are split into four eightbyte chunks. The
+ // least significant one belongs to class SSE and all the others to class
+ // SSEUP. The original Lo and Hi design considers that types can't be
+ // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
+ // This design isn't correct for 256-bits, but since there're no cases
+ // where the upper parts would need to be inspected, avoid adding
+ // complexity and just consider Hi to match the 64-256 part.
Lo = SSE;
Hi = SSEUp;
}
@@ -1121,8 +1182,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than two eightbytes, ..., it has class MEMORY.
- if (Size > 128)
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
return;
// AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
@@ -1137,6 +1198,13 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Current = NoClass;
uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
uint64_t ArraySize = AT->getSize().getZExtValue();
+
+ // The only case a 256-bit wide vector could be used is when the array
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ if (Size > 128 && EltSize != 256)
+ return;
+
for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
Class FieldLo, FieldHi;
classify(AT->getElementType(), Offset, FieldLo, FieldHi);
@@ -1146,9 +1214,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
break;
}
- // Do post merger cleanup (see below). Only case we worry about is Memory.
- if (Hi == Memory)
- Lo = Memory;
+ postMerge(Size, Lo, Hi);
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
return;
}
@@ -1157,8 +1223,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than two eightbytes, ..., it has class MEMORY.
- if (Size > 128)
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
return;
// AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
@@ -1209,9 +1275,17 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
bool BitField = i->isBitField();
- // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
- // fields, it has class MEMORY.
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
+ // four eightbytes, or it contains unaligned fields, it has class MEMORY.
+ //
+ // The only case a 256-bit wide vector could be used is when the struct
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
//
+ if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
+ Lo = Memory;
+ return;
+ }
// Note, skip this test for bit-fields, see below.
if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
Lo = Memory;
@@ -1257,31 +1331,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
break;
}
- // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
- //
- // (a) If one of the classes is MEMORY, the whole argument is
- // passed in memory.
- //
- // (b) If X87UP is not preceded by X87, the whole argument is
- // passed in memory.
- //
- // (c) If the size of the aggregate exceeds two eightbytes and the first
- // eight-byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole
- // argument is passed in memory.
- //
- // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
- //
- // Some of these are enforced by the merging logic. Others can arise
- // only with unions; for example:
- // union { _Complex double; unsigned; }
- //
- // Note that clauses (b) and (c) were added in 0.98.
- if (Hi == Memory)
- Lo = Memory;
- if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
- Lo = Memory;
- if (Hi == SSEUp && Lo != SSE)
- Hi = SSE;
+ postMerge(Size, Lo, Hi);
}
}
@@ -1321,24 +1371,25 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
return ABIArgInfo::getIndirect(Align);
}
-/// Get16ByteVectorType - The ABI specifies that a value should be passed in an
-/// full vector XMM register. Pick an LLVM IR type that will be passed as a
+/// GetByteVectorType - The ABI specifies that a value should be passed in an
+/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a
/// vector register.
-const llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const {
- const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
+ llvm::Type *IRType = CGT.ConvertType(Ty);
// Wrapper structs that just contain vectors are passed just like vectors,
// strip them off if present.
- const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
while (STy && STy->getNumElements() == 1) {
IRType = STy->getElementType(0);
STy = dyn_cast<llvm::StructType>(IRType);
}
// If the preferred type is a 16-byte vector, prefer to pass it.
- if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
- const llvm::Type *EltTy = VT->getElementType();
- if (VT->getBitWidth() == 128 &&
+ if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
+ llvm::Type *EltTy = VT->getElementType();
+ unsigned BitWidth = VT->getBitWidth();
+ if ((BitWidth == 128 || BitWidth == 256) &&
(EltTy->isFloatTy() || EltTy->isDoubleTy() ||
EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
@@ -1466,8 +1517,8 @@ static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
/// low 8 bytes of an XMM register, corresponding to the SSE class.
-const llvm::Type *X86_64ABIInfo::
-GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
+llvm::Type *X86_64ABIInfo::
+GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
QualType SourceTy, unsigned SourceOffset) const {
// The only three choices we have are either double, <2 x float>, or float. We
// pass as float if the last 4 bytes is just padding. This happens for
@@ -1501,8 +1552,8 @@ GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
/// SourceTy is the source level type for the entire argument. SourceOffset is
/// an offset into this that we're processing (which is always either 0 or 8).
///
-const llvm::Type *X86_64ABIInfo::
-GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
+llvm::Type *X86_64ABIInfo::
+GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
QualType SourceTy, unsigned SourceOffset) const {
// If we're dealing with an un-offset LLVM IR type, then it means that we're
// returning an 8-byte unit starting with it. See if we can safely use it.
@@ -1540,7 +1591,7 @@ GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
}
if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
- const llvm::Type *EltTy = ATy->getElementType();
+ llvm::Type *EltTy = ATy->getElementType();
unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
unsigned EltOffset = IROffset/EltSize*EltSize;
return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
@@ -1566,8 +1617,8 @@ GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
/// first class aggregate to represent them. For example, if the low part of
/// a by-value argument should be passed as i32* and the high part as float,
/// return {i32*, float}.
-static const llvm::Type *
-GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
+static llvm::Type *
+GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
const llvm::TargetData &TD) {
// In order to correctly satisfy the ABI, we need to the high part to start
// at offset 8. If the high and low parts we inferred are both 4-byte types
@@ -1594,8 +1645,7 @@ GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
}
}
- const llvm::StructType *Result =
- llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL);
+ llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
// Verify that the second element is at an 8-byte offset.
@@ -1615,7 +1665,7 @@ classifyReturnType(QualType RetTy) const {
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
- const llvm::Type *ResType = 0;
+ llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
if (Hi == NoClass)
@@ -1638,8 +1688,7 @@ classifyReturnType(QualType RetTy) const {
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
case Integer:
- ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0,
- RetTy, 0);
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
// If we have a sign or zero extended integer, make sure to return Extend
// so that the parameter gets the right LLVM IR attributes.
@@ -1657,7 +1706,7 @@ classifyReturnType(QualType RetTy) const {
// AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
// available SSE register of the sequence %xmm0, %xmm1 is used.
case SSE:
- ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, RetTy, 0);
+ ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
break;
// AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
@@ -1671,14 +1720,13 @@ classifyReturnType(QualType RetTy) const {
// %st1.
case ComplexX87:
assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
- ResType = llvm::StructType::get(getVMContext(),
- llvm::Type::getX86_FP80Ty(getVMContext()),
+ ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
llvm::Type::getX86_FP80Ty(getVMContext()),
NULL);
break;
}
- const llvm::Type *HighPart = 0;
+ llvm::Type *HighPart = 0;
switch (Hi) {
// Memory was handled previously and X87 should
// never occur as a hi class.
@@ -1691,24 +1739,24 @@ classifyReturnType(QualType RetTy) const {
break;
case Integer:
- HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
- 8, RetTy, 8);
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
break;
case SSE:
- HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
break;
// AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
- // is passed in the upper half of the last used SSE register.
+ // is passed in the next available eightbyte chunk if the last used
+ // vector register.
//
// SSEUP should always be preceded by SSE, just widen.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = Get16ByteVectorType(RetTy);
+ ResType = GetByteVectorType(RetTy);
break;
// AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
@@ -1719,8 +1767,7 @@ classifyReturnType(QualType RetTy) const {
// preceded by X87. In such situations we follow gcc and pass the
// extra bits in an SSE reg.
if (Lo != X87) {
- HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
- 8, RetTy, 8);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
}
@@ -1748,7 +1795,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
neededInt = 0;
neededSSE = 0;
- const llvm::Type *ResType = 0;
+ llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
if (Hi == NoClass)
@@ -1767,6 +1814,8 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// COMPLEX_X87, it is passed in memory.
case X87:
case ComplexX87:
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ ++neededInt;
return getIndirectResult(Ty);
case SSEUp:
@@ -1780,7 +1829,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
++neededInt;
// Pick an 8-byte type based on the preferred type.
- ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
// If we have a sign or zero extended integer, make sure to return Extend
// so that the parameter gets the right LLVM IR attributes.
@@ -1800,19 +1849,14 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// available SSE register is used, the registers are taken in the
// order from %xmm0 to %xmm7.
case SSE: {
- const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
- if (Hi != NoClass || !UseX86_MMXType(IRType))
- ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
- else
- // This is an MMX type. Treat it as such.
- ResType = llvm::Type::getX86_MMXTy(getVMContext());
-
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
++neededSSE;
break;
}
}
- const llvm::Type *HighPart = 0;
+ llvm::Type *HighPart = 0;
switch (Hi) {
// Memory was handled previously, ComplexX87 and X87 should
// never occur as hi classes, and X87Up must be preceded by X87,
@@ -1828,7 +1872,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
case Integer:
++neededInt;
// Pick an 8-byte type based on the preferred type.
- HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
@@ -1838,7 +1882,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// memory), except in situations involving unions.
case X87Up:
case SSE:
- HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
@@ -1851,7 +1895,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// register. This only happens when 128-bit vectors are passed.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification");
- ResType = Get16ByteVectorType(Ty);
+ ResType = GetByteVectorType(Ty);
break;
}
@@ -2059,10 +2103,10 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// area, we need to collect the two eightbytes together.
llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
- const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+ llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
const llvm::Type *DblPtrTy =
llvm::PointerType::getUnqual(DoubleTy);
- const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
+ const llvm::StructType *ST = llvm::StructType::get(DoubleTy,
DoubleTy, NULL);
llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
@@ -2277,6 +2321,10 @@ public:
return 13;
}
+ llvm::StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
+ }
+
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
CodeGen::CGBuilderTy &Builder = CGF.Builder;
@@ -2290,8 +2338,6 @@ public:
return false;
}
-
-
};
}
@@ -2371,9 +2417,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
}
- const llvm::Type *STy =
- llvm::StructType::get(getVMContext(),
- llvm::ArrayType::get(ElemTy, SizeRegs), NULL, NULL);
+ llvm::Type *STy =
+ llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
return ABIArgInfo::getDirect(STy);
}
@@ -3010,10 +3055,12 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::msp430:
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
- case llvm::Triple::x86:
+ case llvm::Triple::x86: {
+ bool DisableMMX = strcmp(getContext().Target.getABI(), "no-mmx") == 0;
+
if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, true, true));
+ new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX));
switch (Triple.getOS()) {
case llvm::Triple::Cygwin:
@@ -3024,12 +3071,13 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::OpenBSD:
case llvm::Triple::NetBSD:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, true));
+ new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX));
default:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, false));
+ new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX));
}
+ }
case llvm::Triple::x86_64:
switch (Triple.getOS()) {
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index 4f59eb6..d5e8884 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -106,11 +106,25 @@ namespace clang {
return Address;
}
- virtual const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) const {
+ virtual llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) const {
return Ty;
}
+
+ /// Retrieve the address of a function to call immediately before
+ /// calling objc_retainAutoreleasedReturnValue. The
+ /// implementation of objc_autoreleaseReturnValue sniffs the
+ /// instruction stream following its return address to decide
+ /// whether it's a call to objc_retainAutoreleasedReturnValue.
+ /// This can be prohibitively expensive, depending on the
+ /// relocation model, and so on some targets it instead sniffs for
+ /// a particular instruction sequence. This functions returns
+ /// that instruction sequence in inline assembly, which will be
+ /// empty if none is required.
+ virtual llvm::StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "";
+ }
};
}
OpenPOWER on IntegriCloud