summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h38
-rw-r--r--lib/CodeGen/BackendUtil.cpp109
-rw-r--r--lib/CodeGen/CGBlocks.cpp138
-rw-r--r--lib/CodeGen/CGBlocks.h26
-rw-r--r--lib/CodeGen/CGBuiltin.cpp430
-rw-r--r--lib/CodeGen/CGCXXABI.cpp2
-rw-r--r--lib/CodeGen/CGCXXABI.h12
-rw-r--r--lib/CodeGen/CGCall.cpp223
-rw-r--r--lib/CodeGen/CGClass.cpp31
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp257
-rw-r--r--lib/CodeGen/CGDebugInfo.h3
-rw-r--r--lib/CodeGen/CGDecl.cpp53
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp77
-rw-r--r--lib/CodeGen/CGException.cpp17
-rw-r--r--lib/CodeGen/CGExpr.cpp568
-rw-r--r--lib/CodeGen/CGExprAgg.cpp32
-rw-r--r--lib/CodeGen/CGExprCXX.cpp71
-rw-r--r--lib/CodeGen/CGExprComplex.cpp5
-rw-r--r--lib/CodeGen/CGExprConstant.cpp41
-rw-r--r--lib/CodeGen/CGExprScalar.cpp482
-rw-r--r--lib/CodeGen/CGObjC.cpp86
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp156
-rw-r--r--lib/CodeGen/CGObjCMac.cpp798
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp7
-rw-r--r--lib/CodeGen/CGObjCRuntime.h8
-rw-r--r--lib/CodeGen/CGRTTI.cpp17
-rw-r--r--lib/CodeGen/CGRecordLayout.h4
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp26
-rw-r--r--lib/CodeGen/CGStmt.cpp128
-rw-r--r--lib/CodeGen/CGVTables.cpp60
-rw-r--r--lib/CodeGen/CodeGenAction.cpp6
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp66
-rw-r--r--lib/CodeGen/CodeGenFunction.h133
-rw-r--r--lib/CodeGen/CodeGenModule.cpp227
-rw-r--r--lib/CodeGen/CodeGenModule.h36
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp57
-rw-r--r--lib/CodeGen/CodeGenTBAA.h15
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp4
-rw-r--r--lib/CodeGen/CodeGenTypes.h6
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp58
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp65
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp6
-rw-r--r--lib/CodeGen/TargetInfo.cpp752
43 files changed, 3929 insertions, 1407 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 86f5380..da6d035 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -16,7 +16,7 @@
namespace llvm {
class Value;
class LLVMContext;
- class TargetData;
+ class DataLayout;
}
namespace clang {
@@ -70,46 +70,52 @@ namespace clang {
private:
Kind TheKind;
llvm::Type *TypeData;
- llvm::Type *PaddingType; // Currently allowed only for Direct.
+ llvm::Type *PaddingType;
unsigned UIntData;
bool BoolData0;
bool BoolData1;
bool InReg;
+ bool PaddingInReg;
ABIArgInfo(Kind K, llvm::Type *TD, unsigned UI, bool B0, bool B1, bool IR,
- llvm::Type* P)
+ bool PIR, llvm::Type* P)
: TheKind(K), TypeData(TD), PaddingType(P), UIntData(UI), BoolData0(B0),
- BoolData1(B1), InReg(IR) {}
+ BoolData1(B1), InReg(IR), PaddingInReg(PIR) {}
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0,
llvm::Type *Padding = 0) {
- return ABIArgInfo(Direct, T, Offset, false, false, false, Padding);
+ return ABIArgInfo(Direct, T, Offset, false, false, false, false, Padding);
}
- static ABIArgInfo getDirectInReg(llvm::Type *T) {
- return ABIArgInfo(Direct, T, 0, false, false, true, 0);
+ static ABIArgInfo getDirectInReg(llvm::Type *T = 0) {
+ return ABIArgInfo(Direct, T, 0, false, false, true, false, 0);
}
static ABIArgInfo getExtend(llvm::Type *T = 0) {
- return ABIArgInfo(Extend, T, 0, false, false, false, 0);
+ return ABIArgInfo(Extend, T, 0, false, false, false, false, 0);
}
static ABIArgInfo getExtendInReg(llvm::Type *T = 0) {
- return ABIArgInfo(Extend, T, 0, false, false, true, 0);
+ return ABIArgInfo(Extend, T, 0, false, false, true, false, 0);
}
static ABIArgInfo getIgnore() {
- return ABIArgInfo(Ignore, 0, 0, false, false, false, 0);
+ return ABIArgInfo(Ignore, 0, 0, false, false, false, false, 0);
}
static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true
, bool Realign = false) {
- return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, false, 0);
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, false, false, 0);
}
static ABIArgInfo getIndirectInReg(unsigned Alignment, bool ByVal = true
, bool Realign = false) {
- return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, true, 0);
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, true, false, 0);
}
static ABIArgInfo getExpand() {
- return ABIArgInfo(Expand, 0, 0, false, false, false, 0);
+ return ABIArgInfo(Expand, 0, 0, false, false, false, false, 0);
+ }
+ static ABIArgInfo getExpandWithPadding(bool PaddingInReg,
+ llvm::Type *Padding) {
+ return ABIArgInfo(Expand, 0, 0, false, false, false, PaddingInReg,
+ Padding);
}
Kind getKind() const { return TheKind; }
@@ -133,6 +139,10 @@ namespace clang {
return PaddingType;
}
+ bool getPaddingInReg() const {
+ return PaddingInReg;
+ }
+
llvm::Type *getCoerceToType() const {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
@@ -178,7 +188,7 @@ namespace clang {
ASTContext &getContext() const;
llvm::LLVMContext &getVMContext() const;
- const llvm::TargetData &getTargetData() const;
+ const llvm::DataLayout &getDataLayout() const;
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 0a1915b..62f87c9 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -27,7 +27,7 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -54,36 +54,67 @@ class EmitAssemblyHelper {
mutable FunctionPassManager *PerFunctionPasses;
private:
- PassManager *getCodeGenPasses() const {
+ PassManager *getCodeGenPasses(TargetMachine *TM) const {
if (!CodeGenPasses) {
CodeGenPasses = new PassManager();
- CodeGenPasses->add(new TargetData(TheModule));
+ CodeGenPasses->add(new DataLayout(TheModule));
+ // Add TargetTransformInfo.
+ if (TM) {
+ TargetTransformInfo *TTI =
+ new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+ TM->getVectorTargetTransformInfo());
+ CodeGenPasses->add(TTI);
+ }
}
return CodeGenPasses;
}
- PassManager *getPerModulePasses() const {
+ PassManager *getPerModulePasses(TargetMachine *TM) const {
if (!PerModulePasses) {
PerModulePasses = new PassManager();
- PerModulePasses->add(new TargetData(TheModule));
+ PerModulePasses->add(new DataLayout(TheModule));
+ if (TM) {
+ TargetTransformInfo *TTI =
+ new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+ TM->getVectorTargetTransformInfo());
+ PerModulePasses->add(TTI);
+ }
}
return PerModulePasses;
}
- FunctionPassManager *getPerFunctionPasses() const {
+ FunctionPassManager *getPerFunctionPasses(TargetMachine *TM) const {
if (!PerFunctionPasses) {
PerFunctionPasses = new FunctionPassManager(TheModule);
- PerFunctionPasses->add(new TargetData(TheModule));
+ PerFunctionPasses->add(new DataLayout(TheModule));
+ if (TM) {
+ TargetTransformInfo *TTI =
+ new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+ TM->getVectorTargetTransformInfo());
+ PerFunctionPasses->add(TTI);
+ }
}
return PerFunctionPasses;
}
- void CreatePasses();
+
+ void CreatePasses(TargetMachine *TM);
+
+ /// CreateTargetMachine - Generates the TargetMachine.
+ /// Returns Null if it is unable to create the target machine.
+ /// Some of our clang tests specify triples which are not built
+ /// into clang. This is okay because these tests check the generated
+ /// IR, and they require DataLayout which depends on the triple.
+ /// In this case, we allow this method to fail and not report an error.
+ /// When MustCreateTM is used, we print an error if we are unable to load
+ /// the requested target.
+ TargetMachine *CreateTargetMachine(bool MustCreateTM);
/// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR.
///
/// \return True on success.
- bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS);
+ bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS,
+ TargetMachine *TM);
public:
EmitAssemblyHelper(DiagnosticsEngine &_Diags,
@@ -137,9 +168,9 @@ static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
PM.add(createThreadSanitizerPass());
}
-void EmitAssemblyHelper::CreatePasses() {
+void EmitAssemblyHelper::CreatePasses(TargetMachine *TM) {
unsigned OptLevel = CodeGenOpts.OptimizationLevel;
- CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
+ CodeGenOptions::InliningMethod Inlining = CodeGenOpts.getInlining();
// Handle disabling of LLVM optimization, where we want to preserve the
// internal module before any optimization.
@@ -174,14 +205,14 @@ void EmitAssemblyHelper::CreatePasses() {
addBoundsCheckingPass);
}
- if (LangOpts.AddressSanitizer) {
- PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ if (LangOpts.SanitizeAddress) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addAddressSanitizerPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
addAddressSanitizerPass);
}
- if (LangOpts.ThreadSanitizer) {
+ if (LangOpts.SanitizeThread) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addThreadSanitizerPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
@@ -218,38 +249,36 @@ void EmitAssemblyHelper::CreatePasses() {
break;
}
-
// Set up the per-function pass manager.
- FunctionPassManager *FPM = getPerFunctionPasses();
+ FunctionPassManager *FPM = getPerFunctionPasses(TM);
if (CodeGenOpts.VerifyModule)
FPM->add(createVerifierPass());
PMBuilder.populateFunctionPassManager(*FPM);
// Set up the per-module pass manager.
- PassManager *MPM = getPerModulePasses();
+ PassManager *MPM = getPerModulePasses(TM);
if (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes) {
MPM->add(createGCOVProfilerPass(CodeGenOpts.EmitGcovNotes,
CodeGenOpts.EmitGcovArcs,
TargetTriple.isMacOSX()));
- if (CodeGenOpts.DebugInfo == CodeGenOptions::NoDebugInfo)
+ if (CodeGenOpts.getDebugInfo() == CodeGenOptions::NoDebugInfo)
MPM->add(createStripSymbolsPass(true));
}
-
-
+
PMBuilder.populateModulePassManager(*MPM);
}
-bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
- formatted_raw_ostream &OS) {
+TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
// Create the TargetMachine for generating code.
std::string Error;
std::string Triple = TheModule->getTargetTriple();
const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
if (!TheTarget) {
- Diags.Report(diag::err_fe_unable_to_create_target) << Error;
- return false;
+ if (MustCreateTM)
+ Diags.Report(diag::err_fe_unable_to_create_target) << Error;
+ return 0;
}
// FIXME: Expose these capabilities via actual APIs!!!! Aside from just
@@ -361,7 +390,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
break;
case LangOptions::FPC_Fast:
Options.AllowFPOpFusion = llvm::FPOpFusion::Fast;
- break;
+ break;
}
Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
@@ -375,6 +404,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
Options.DisableTailCalls = CodeGenOpts.DisableTailCalls;
Options.TrapFuncName = CodeGenOpts.TrapFuncName;
Options.PositionIndependentExecutable = LangOpts.PIELevel != 0;
+ Options.SSPBufferSize = CodeGenOpts.SSPBufferSize;
TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
FeaturesStr, Options,
@@ -391,15 +421,27 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
if (CodeGenOpts.NoExecStack)
TM->setMCNoExecStack(true);
+ return TM;
+}
+
+bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
+ formatted_raw_ostream &OS,
+ TargetMachine *TM) {
+
// Create the code generator passes.
- PassManager *PM = getCodeGenPasses();
+ PassManager *PM = getCodeGenPasses(TM);
// Add LibraryInfo.
- TargetLibraryInfo *TLI = new TargetLibraryInfo();
+ llvm::Triple TargetTriple(TheModule->getTargetTriple());
+ TargetLibraryInfo *TLI = new TargetLibraryInfo(TargetTriple);
if (!CodeGenOpts.SimplifyLibCalls)
TLI->disableAllFunctions();
PM->add(TLI);
+ // Add TargetTransformInfo.
+ PM->add(new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+ TM->getVectorTargetTransformInfo()));
+
// Normal mode, emit a .s or .o file by running the code generator. Note,
// this also adds codegenerator level optimization passes.
TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
@@ -430,23 +472,28 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0);
llvm::formatted_raw_ostream FormattedOS;
- CreatePasses();
+ bool UsesCodeGen = (Action != Backend_EmitNothing &&
+ Action != Backend_EmitBC &&
+ Action != Backend_EmitLL);
+ TargetMachine *TM = CreateTargetMachine(UsesCodeGen);
+ CreatePasses(TM);
+
switch (Action) {
case Backend_EmitNothing:
break;
case Backend_EmitBC:
- getPerModulePasses()->add(createBitcodeWriterPass(*OS));
+ getPerModulePasses(TM)->add(createBitcodeWriterPass(*OS));
break;
case Backend_EmitLL:
FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
- getPerModulePasses()->add(createPrintModulePass(&FormattedOS));
+ getPerModulePasses(TM)->add(createPrintModulePass(&FormattedOS));
break;
default:
FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
- if (!AddEmitPasses(Action, FormattedOS))
+ if (!AddEmitPasses(Action, FormattedOS, TM))
return;
}
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 37ef4af..6742f36 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -19,7 +19,7 @@
#include "clang/AST/DeclObjC.h"
#include "llvm/Module.h"
#include "llvm/ADT/SmallSet.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <algorithm>
using namespace clang;
@@ -27,7 +27,8 @@ using namespace CodeGen;
CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
: Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
- HasCXXObject(false), UsesStret(false), StructureType(0), Block(block),
+ HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
+ StructureType(0), Block(block),
DominatingIP(0) {
// Skip asm prefix, if any. 'name' is usually taken directly from
@@ -56,7 +57,18 @@ static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
return CodeGenFunction(CGM).GenerateDestroyHelperFunction(blockInfo);
}
-/// Build the block descriptor constant for a block.
+/// buildBlockDescriptor - Build the block descriptor meta-data for a block.
+/// buildBlockDescriptor is accessed from 5th field of the Block_literal
+/// meta-data and contains stationary information about the block literal.
+/// Its definition will have 4 (or optinally 6) words.
+/// struct Block_descriptor {
+/// unsigned long reserved;
+/// unsigned long size; // size of Block_literal metadata in bytes.
+/// void *copy_func_helper_decl; // optional copy helper.
+/// void *destroy_func_decl; // optioanl destructor helper.
+/// void *block_method_encoding_address;//@encode for block literal signature.
+/// void *block_layout_info; // encoding of captured block variables.
+/// };
static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
ASTContext &C = CGM.getContext();
@@ -92,8 +104,12 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
CGM.GetAddrOfConstantCString(typeAtEncoding), i8p));
// GC layout.
- if (C.getLangOpts().ObjC1)
- elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ if (C.getLangOpts().ObjC1) {
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
+ elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ else
+ elements.push_back(CGM.getObjCRuntime().BuildRCBlockLayout(CGM, blockInfo));
+ }
else
elements.push_back(llvm::Constant::getNullValue(i8p));
@@ -293,7 +309,10 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
info.CanBeGlobal = true;
return;
}
-
+ else if (C.getLangOpts().ObjC1 &&
+ CGM.getLangOpts().getGC() == LangOptions::NonGC)
+ info.HasCapturedVariableLayout = true;
+
// Collect the layout chunks.
SmallVector<BlockLayoutChunk, 16> layout;
layout.reserve(block->capturesCXXThis() +
@@ -652,6 +671,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Compute the initial on-stack block flags.
BlockFlags flags = BLOCK_HAS_SIGNATURE;
+ if (blockInfo.HasCapturedVariableLayout) flags |= BLOCK_HAS_EXTENDED_LAYOUT;
if (blockInfo.NeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ;
if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
@@ -1001,8 +1021,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
// Check if we should generate debug info for this block function.
- if (CGM.getModuleDebugInfo())
- DebugInfo = CGM.getModuleDebugInfo();
+ maybeInitializeDebugInfo();
CurGD = GD;
BlockInfo = &blockInfo;
@@ -1135,7 +1154,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const VarDecl *variable = ci->getVariable();
DI->EmitLocation(Builder, variable->getLocation());
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (capture.isConstant()) {
DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
@@ -1207,8 +1227,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
= &CGM.getContext().Idents.get("__copy_helper_block_");
// Check if we should generate debug info for this block helper function.
- if (CGM.getModuleDebugInfo())
- DebugInfo = CGM.getModuleDebugInfo();
+ maybeInitializeDebugInfo();
FunctionDecl *FD = FunctionDecl::Create(C,
C.getTranslationUnitDecl(),
@@ -1243,7 +1262,8 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
const Expr *copyExpr = ci->getCopyExpr();
BlockFieldFlags flags;
- bool isARCWeakCapture = false;
+ bool useARCWeakCopy = false;
+ bool useARCStrongCopy = false;
if (copyExpr) {
assert(!ci->isByRef());
@@ -1256,21 +1276,35 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
} else if (type->isObjCRetainableType()) {
flags = BLOCK_FIELD_IS_OBJECT;
- if (type->isBlockPointerType())
+ bool isBlockPointer = type->isBlockPointerType();
+ if (isBlockPointer)
flags = BLOCK_FIELD_IS_BLOCK;
// Special rules for ARC captures:
if (getLangOpts().ObjCAutoRefCount) {
Qualifiers qs = type.getQualifiers();
- // Don't generate special copy logic for a captured object
- // unless it's __strong or __weak.
- if (!qs.hasStrongOrWeakObjCLifetime())
+ // We need to register __weak direct captures with the runtime.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ useARCWeakCopy = true;
+
+ // We need to retain the copied value for __strong direct captures.
+ } else if (qs.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // If it's a block pointer, we have to copy the block and
+ // assign that to the destination pointer, so we might as
+ // well use _Block_object_assign. Otherwise we can avoid that.
+ if (!isBlockPointer)
+ useARCStrongCopy = true;
+
+ // Otherwise the memcpy is fine.
+ } else {
continue;
+ }
- // Support __weak direct captures.
- if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
- isARCWeakCapture = true;
+ // Non-ARC captures of retainable pointers are strong and
+ // therefore require a call to _Block_object_assign.
+ } else {
+ // fall through
}
} else {
continue;
@@ -1283,14 +1317,36 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// If there's an explicit copy expression, we do that.
if (copyExpr) {
EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
- } else if (isARCWeakCapture) {
+ } else if (useARCWeakCopy) {
EmitARCCopyWeak(dstField, srcField);
} else {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
- srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
- llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
- Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
- llvm::ConstantInt::get(Int32Ty, flags.getBitMask()));
+ if (useARCStrongCopy) {
+ // At -O0, store null into the destination field (so that the
+ // storeStrong doesn't over-release) and then call storeStrong.
+ // This is a workaround to not having an initStrong call.
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::PointerType *ty = cast<llvm::PointerType>(srcValue->getType());
+ llvm::Value *null = llvm::ConstantPointerNull::get(ty);
+ Builder.CreateStore(null, dstField);
+ EmitARCStoreStrongCall(dstField, srcValue, true);
+
+ // With optimization enabled, take advantage of the fact that
+ // the blocks runtime guarantees a memcpy of the block data, and
+ // just emit a retain of the src field.
+ } else {
+ EmitARCRetainNonBlock(srcValue);
+
+ // We don't need this anymore, so kill it. It's not quite
+ // worth the annoyance to avoid creating it in the first place.
+ cast<llvm::Instruction>(dstField)->eraseFromParent();
+ }
+ } else {
+ srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
+ llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
+ Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
+ llvm::ConstantInt::get(Int32Ty, flags.getBitMask()));
+ }
}
}
@@ -1321,8 +1377,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
"__destroy_helper_block_", &CGM.getModule());
// Check if we should generate debug info for this block destroy function.
- if (CGM.getModuleDebugInfo())
- DebugInfo = CGM.getModuleDebugInfo();
+ maybeInitializeDebugInfo();
IdentifierInfo *II
= &CGM.getContext().Idents.get("__destroy_helper_block_");
@@ -1356,7 +1411,8 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
BlockFieldFlags flags;
const CXXDestructorDecl *dtor = 0;
- bool isARCWeakCapture = false;
+ bool useARCWeakDestroy = false;
+ bool useARCStrongDestroy = false;
if (ci->isByRef()) {
flags = BLOCK_FIELD_IS_BYREF;
@@ -1382,7 +1438,11 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
// Support __weak direct captures.
if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
- isARCWeakCapture = true;
+ useARCWeakDestroy = true;
+
+ // Tools really want us to use objc_storeStrong here.
+ else
+ useARCStrongDestroy = true;
}
} else {
continue;
@@ -1396,9 +1456,13 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
PushDestructorCleanup(dtor, srcField);
// If this is a __weak capture, emit the release directly.
- } else if (isARCWeakCapture) {
+ } else if (useARCWeakDestroy) {
EmitARCDestroyWeak(srcField);
+ // Destroy strong objects with a call if requested.
+ } else if (useARCStrongDestroy) {
+ EmitARCDestroyStrong(srcField, /*precise*/ false);
+
// Otherwise we call _Block_object_dispose. It wouldn't be too
// hard to just emit this as a cleanup if we wanted to make sure
// that things were done in reverse.
@@ -1497,10 +1561,7 @@ public:
}
void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
- llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
- value->setAlignment(Alignment.getQuantity());
-
- CGF.EmitARCRelease(value, /*precise*/ false);
+ CGF.EmitARCDestroyStrong(field, /*precise*/ false);
}
void profileImpl(llvm::FoldingSetNodeID &id) const {
@@ -1530,10 +1591,7 @@ public:
}
void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
- llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
- value->setAlignment(Alignment.getQuantity());
-
- CGF.EmitARCRelease(value, /*precise*/ false);
+ CGF.EmitARCDestroyStrong(field, /*precise*/ false);
}
void profileImpl(llvm::FoldingSetNodeID &id) const {
@@ -1612,6 +1670,8 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
SC_None,
false, false);
+ // Initialize debug info if necessary.
+ CGF.maybeInitializeDebugInfo();
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsCopy()) {
@@ -1682,6 +1742,8 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
SC_Static,
SC_None,
false, false);
+ // Initialize debug info if necessary.
+ CGF.maybeInitializeDebugInfo();
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsDispose()) {
@@ -1879,7 +1941,7 @@ llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
// And either 2 or 4 pointers.
CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
- CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
+ CGM.getDataLayout().getTypeAllocSize(Int8PtrTy);
// Align the offset.
unsigned AlignedOffsetInBytes =
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 095cfdb..f85701a 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -33,7 +33,7 @@ namespace llvm {
class Constant;
class Function;
class GlobalValue;
- class TargetData;
+ class DataLayout;
class FunctionType;
class PointerType;
class Value;
@@ -47,12 +47,24 @@ namespace CodeGen {
class CodeGenModule;
class CGBlockInfo;
-enum BlockFlag_t {
+// Flags stored in __block variables.
+enum BlockByrefFlags {
+ BLOCK_BYREF_HAS_COPY_DISPOSE = (1 << 25), // compiler
+ BLOCK_BYREF_LAYOUT_MASK = (0xF << 28), // compiler
+ BLOCK_BYREF_LAYOUT_EXTENDED = (1 << 28),
+ BLOCK_BYREF_LAYOUT_NON_OBJECT = (2 << 28),
+ BLOCK_BYREF_LAYOUT_STRONG = (3 << 28),
+ BLOCK_BYREF_LAYOUT_WEAK = (4 << 28),
+ BLOCK_BYREF_LAYOUT_UNRETAINED = (5 << 28)
+};
+
+enum BlockLiteralFlags {
BLOCK_HAS_COPY_DISPOSE = (1 << 25),
BLOCK_HAS_CXX_OBJ = (1 << 26),
BLOCK_IS_GLOBAL = (1 << 28),
BLOCK_USE_STRET = (1 << 29),
- BLOCK_HAS_SIGNATURE = (1 << 30)
+ BLOCK_HAS_SIGNATURE = (1 << 30),
+ BLOCK_HAS_EXTENDED_LAYOUT = (1 << 31)
};
class BlockFlags {
uint32_t flags;
@@ -60,7 +72,7 @@ class BlockFlags {
BlockFlags(uint32_t flags) : flags(flags) {}
public:
BlockFlags() : flags(0) {}
- BlockFlags(BlockFlag_t flag) : flags(flag) {}
+ BlockFlags(BlockLiteralFlags flag) : flags(flag) {}
uint32_t getBitMask() const { return flags; }
bool empty() const { return flags == 0; }
@@ -76,7 +88,7 @@ public:
return (l.flags & r.flags);
}
};
-inline BlockFlags operator|(BlockFlag_t l, BlockFlag_t r) {
+inline BlockFlags operator|(BlockLiteralFlags l, BlockLiteralFlags r) {
return BlockFlags(l) | BlockFlags(r);
}
@@ -182,6 +194,10 @@ public:
/// UsesStret : True if the block uses an stret return. Mutable
/// because it gets set later in the block-creation process.
mutable bool UsesStret : 1;
+
+ /// HasCapturedVariableLayout : True if block has captured variables
+ /// and their layout meta-data has been generated.
+ bool HasCapturedVariableLayout : 1;
/// The mapping of allocated indexes within the block.
llvm::DenseMap<const VarDecl*, Capture> Captures;
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 59ed313..e8c05d3 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -20,7 +20,7 @@
#include "clang/AST/Decl.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/Intrinsics.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -86,8 +86,7 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
@@ -121,8 +120,7 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
@@ -148,7 +146,7 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
assert(ValTyP && "isn't scalar fp type!");
-
+
StringRef FnName;
switch (ValTyP->getKind()) {
default: llvm_unreachable("Isn't a scalar fp type!");
@@ -156,7 +154,7 @@ static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
case BuiltinType::Double: FnName = "fabs"; break;
case BuiltinType::LongDouble: FnName = "fabsl"; break;
}
-
+
// The prototype is something that takes and returns whatever V's type is.
llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
false);
@@ -214,7 +212,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
DstPtr, SrcPtr));
}
- case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_abs:
case Builtin::BI__builtin_labs:
case Builtin::BI__builtin_llabs: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
@@ -229,18 +227,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
-
+
case Builtin::BI__builtin_conj:
case Builtin::BI__builtin_conjf:
case Builtin::BI__builtin_conjl: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
Value *Real = ComplexVal.first;
Value *Imag = ComplexVal.second;
- Value *Zero =
- Imag->getType()->isFPOrFPVectorTy()
+ Value *Zero =
+ Imag->getType()->isFPOrFPVectorTy()
? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
: llvm::Constant::getNullValue(Imag->getType());
-
+
Imag = Builder.CreateFSub(Zero, Imag, "sub");
return RValue::getComplex(std::make_pair(Real, Imag));
}
@@ -250,14 +248,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
return RValue::get(ComplexVal.first);
}
-
+
case Builtin::BI__builtin_cimag:
case Builtin::BI__builtin_cimagf:
case Builtin::BI__builtin_cimagl: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
return RValue::get(ComplexVal.second);
}
-
+
case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
@@ -356,6 +354,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"expval");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
@@ -371,15 +370,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// We pass this builtin onto the optimizer so that it can
// figure out the object size in more complex cases.
llvm::Type *ResType = ConvertType(E->getType());
-
+
// LLVM only supports 0 and 2, make sure that we pass along that
// as a boolean.
Value *Ty = EmitScalarExpr(E->getArg(1));
ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
assert(CI);
uint64_t val = CI->getZExtValue();
- CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
-
+ CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
+
Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType);
return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI));
}
@@ -402,9 +401,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::trap);
return RValue::get(Builder.CreateCall(F));
}
+ case Builtin::BI__debugbreak: {
+ Value *F = CGM.getIntrinsic(Intrinsic::debugtrap);
+ return RValue::get(Builder.CreateCall(F));
+ }
case Builtin::BI__builtin_unreachable: {
- if (CatchUndefined)
- EmitBranch(getTrapBB());
+ if (getLangOpts().SanitizeUnreachable)
+ EmitCheck(Builder.getFalse(), "builtin_unreachable",
+ EmitCheckSourceLocation(E->getExprLoc()),
+ llvm::ArrayRef<llvm::Value *>());
else
Builder.CreateUnreachable();
@@ -413,7 +418,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(0);
}
-
+
case Builtin::BI__builtin_powi:
case Builtin::BI__builtin_powif:
case Builtin::BI__builtin_powil: {
@@ -464,16 +469,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
V = Builder.CreateFCmpUNO(V, V, "cmp");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
-
+
case Builtin::BI__builtin_isinf: {
// isinf(x) --> fabs(x) == infinity
Value *V = EmitScalarExpr(E->getArg(0));
V = EmitFAbs(*this, V, E->getArg(0)->getType());
-
+
V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
-
+
// TODO: BI__builtin_isinf_sign
// isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
@@ -499,11 +504,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// isfinite(x) --> x == x && fabs(x) != infinity;
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
-
+
Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
Value *IsNotInf =
Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
-
+
V = Builder.CreateAnd(Eq, IsNotInf, "and");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
@@ -565,7 +570,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.SetInsertPoint(End);
return RValue::get(Result);
}
-
+
case Builtin::BIalloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
@@ -573,85 +578,90 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
- Value *Address = EmitScalarExpr(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
- unsigned Align = GetPointeeAlignment(E->getArg(0));
- Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, Align, false);
- return RValue::get(Address);
+ Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
+ Dest.second, false);
+ return RValue::get(Dest.first);
}
case Builtin::BImemcpy:
case Builtin::BI__builtin_memcpy: {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
- GetPointeeAlignment(E->getArg(1)));
- Builder.CreateMemCpy(Address, SrcAddr, SizeVal, Align, false);
- return RValue::get(Address);
+ unsigned Align = std::min(Dest.second, Src.second);
+ Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
+ return RValue::get(Dest.first);
}
-
+
case Builtin::BI__builtin___memcpy_chk: {
- // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
llvm::APSInt Size, DstSize;
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
if (Size.ugt(DstSize))
break;
- Value *Dest = EmitScalarExpr(E->getArg(0));
- Value *Src = EmitScalarExpr(E->getArg(1));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
- GetPointeeAlignment(E->getArg(1)));
- Builder.CreateMemCpy(Dest, Src, SizeVal, Align, false);
- return RValue::get(Dest);
+ unsigned Align = std::min(Dest.second, Src.second);
+ Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
+ return RValue::get(Dest.first);
}
-
+
case Builtin::BI__builtin_objc_memmove_collectable: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
Address, SrcAddr, SizeVal);
return RValue::get(Address);
}
case Builtin::BI__builtin___memmove_chk: {
- // fold __builtin_memmove_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
llvm::APSInt Size, DstSize;
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
if (Size.ugt(DstSize))
break;
- Value *Dest = EmitScalarExpr(E->getArg(0));
- Value *Src = EmitScalarExpr(E->getArg(1));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
- GetPointeeAlignment(E->getArg(1)));
- Builder.CreateMemMove(Dest, Src, SizeVal, Align, false);
- return RValue::get(Dest);
+ unsigned Align = std::min(Dest.second, Src.second);
+ Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
+ return RValue::get(Dest.first);
}
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove: {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
- GetPointeeAlignment(E->getArg(1)));
- Builder.CreateMemMove(Address, SrcAddr, SizeVal, Align, false);
- return RValue::get(Address);
+ unsigned Align = std::min(Dest.second, Src.second);
+ Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
+ return RValue::get(Dest.first);
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
- Value *Address = EmitScalarExpr(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- unsigned Align = GetPointeeAlignment(E->getArg(0));
- Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
- return RValue::get(Address);
+ Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
+ return RValue::get(Dest.first);
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
@@ -661,14 +671,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
if (Size.ugt(DstSize))
break;
- Value *Address = EmitScalarExpr(E->getArg(0));
+ std::pair<llvm::Value*, unsigned> Dest =
+ EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- unsigned Align = GetPointeeAlignment(E->getArg(0));
- Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
-
- return RValue::get(Address);
+ Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
+ return RValue::get(Dest.first);
}
case Builtin::BI__builtin_dwarf_cfa: {
// The offset in bytes from the first argument to the CFA.
@@ -682,7 +691,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
int32_t Offset = 0;
Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
- return RValue::get(Builder.CreateCall(F,
+ return RValue::get(Builder.CreateCall(F,
llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
@@ -907,9 +916,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_val_compare_and_swap_16: {
QualType T = E->getType();
llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
-
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+
llvm::IntegerType *IntType =
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
@@ -935,9 +943,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_bool_compare_and_swap_16: {
QualType T = E->getArg(1)->getType();
llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
-
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+
llvm::IntegerType *IntType =
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
@@ -982,7 +989,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
StoreSize.getQuantity() * 8);
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
- llvm::StoreInst *Store =
+ llvm::StoreInst *Store =
Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
Store->setAlignment(StoreSize.getQuantity());
Store->setAtomic(llvm::Release);
@@ -993,7 +1000,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// We assume this is supposed to correspond to a C++0x-style
// sequentially-consistent fence (i.e. this is only usable for
// synchonization, not device I/O or anything like that). This intrinsic
- // is really badly designed in the sense that in theory, there isn't
+ // is really badly designed in the sense that in theory, there isn't
// any way to safely use it... but in practice, it mostly works
// to use it with non-atomic loads and stores to get acquire/release
// semantics.
@@ -1033,8 +1040,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(1);
Value *Order = EmitScalarExpr(E->getArg(1));
@@ -1120,8 +1126,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace =
- cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
@@ -1310,6 +1315,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
}
+ case Builtin::BI__noop:
+ return RValue::get(0);
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
@@ -1318,7 +1325,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
return emitLibraryCall(*this, FD, E,
CGM.getBuiltinLibFunction(FD, BuiltinID));
-
+
// If this is a predefined lib function (e.g. malloc), emit the call
// using exactly the normal call path.
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
@@ -1350,7 +1357,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if ((ICEArguments & (1 << i)) == 0) {
ArgValue = EmitScalarExpr(E->getArg(i));
} else {
- // If this is required to be a constant, constant fold it so that we
+ // If this is required to be a constant, constant fold it so that we
// know that the generated intrinsic gets a ConstantInt.
llvm::APSInt Result;
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
@@ -1375,7 +1382,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
QualType BuiltinRetType = E->getType();
llvm::Type *RetTy = VoidTy;
- if (!BuiltinRetType->isVoidType())
+ if (!BuiltinRetType->isVoidType())
RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
@@ -1457,10 +1464,10 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
return Builder.CreateCall(F, Ops, name);
}
-Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
int SV = cast<ConstantInt>(V)->getSExtValue();
-
+
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
@@ -1469,34 +1476,56 @@ Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
/// GetPointeeAlignment - Given an expression with a pointer type, find the
/// alignment of the type referenced by the pointer. Skip over implicit
/// casts.
-unsigned CodeGenFunction::GetPointeeAlignment(const Expr *Addr) {
- unsigned Align = 1;
- // Check if the type is a pointer. The implicit cast operand might not be.
- while (Addr->getType()->isPointerType()) {
- QualType PtTy = Addr->getType()->getPointeeType();
-
- // Can't get alignment of incomplete types.
- if (!PtTy->isIncompleteType()) {
- unsigned NewA = getContext().getTypeAlignInChars(PtTy).getQuantity();
- if (NewA > Align)
- Align = NewA;
+std::pair<llvm::Value*, unsigned>
+CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
+ assert(Addr->getType()->isPointerType());
+ Addr = Addr->IgnoreParens();
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
+ if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
+ ICE->getSubExpr()->getType()->isPointerType()) {
+ std::pair<llvm::Value*, unsigned> Ptr =
+ EmitPointerWithAlignment(ICE->getSubExpr());
+ Ptr.first = Builder.CreateBitCast(Ptr.first,
+ ConvertType(Addr->getType()));
+ return Ptr;
+ } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
+ LValue LV = EmitLValue(ICE->getSubExpr());
+ unsigned Align = LV.getAlignment().getQuantity();
+ if (!Align) {
+ // FIXME: Once LValues are fixed to always set alignment,
+ // zap this code.
+ QualType PtTy = ICE->getSubExpr()->getType();
+ if (!PtTy->isIncompleteType())
+ Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
+ else
+ Align = 1;
+ }
+ return std::make_pair(LV.getAddress(), Align);
}
-
- // If the address is an implicit cast, repeat with the cast operand.
- if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) {
- Addr = CastAddr->getSubExpr();
- continue;
+ }
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
+ if (UO->getOpcode() == UO_AddrOf) {
+ LValue LV = EmitLValue(UO->getSubExpr());
+ unsigned Align = LV.getAlignment().getQuantity();
+ if (!Align) {
+ // FIXME: Once LValues are fixed to always set alignment,
+ // zap this code.
+ QualType PtTy = UO->getSubExpr()->getType();
+ if (!PtTy->isIncompleteType())
+ Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
+ else
+ Align = 1;
+ }
+ return std::make_pair(LV.getAddress(), Align);
}
- break;
}
- return Align;
-}
-/// GetPointeeAlignmentValue - Given an expression with a pointer type, find
-/// the alignment of the type referenced by the pointer. Skip over implicit
-/// casts. Return the alignment as an llvm::Value.
-Value *CodeGenFunction::GetPointeeAlignmentValue(const Expr *Addr) {
- return llvm::ConstantInt::get(Int32Ty, GetPointeeAlignment(Addr));
+ unsigned Align = 1;
+ QualType PtTy = Addr->getType()->getPointeeType();
+ if (!PtTy->isIncompleteType())
+ Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
+
+ return std::make_pair(EmitScalarExpr(Addr), Align);
}
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
@@ -1549,8 +1578,69 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
SmallVector<Value*, 4> Ops;
- for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
+ llvm::Value *Align = 0;
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
+ if (i == 0) {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld1_v:
+ case ARM::BI__builtin_neon_vld1q_v:
+ case ARM::BI__builtin_neon_vld1q_lane_v:
+ case ARM::BI__builtin_neon_vld1_lane_v:
+ case ARM::BI__builtin_neon_vld1_dup_v:
+ case ARM::BI__builtin_neon_vld1q_dup_v:
+ case ARM::BI__builtin_neon_vst1_v:
+ case ARM::BI__builtin_neon_vst1q_v:
+ case ARM::BI__builtin_neon_vst1q_lane_v:
+ case ARM::BI__builtin_neon_vst1_lane_v:
+ case ARM::BI__builtin_neon_vst2_v:
+ case ARM::BI__builtin_neon_vst2q_v:
+ case ARM::BI__builtin_neon_vst2_lane_v:
+ case ARM::BI__builtin_neon_vst2q_lane_v:
+ case ARM::BI__builtin_neon_vst3_v:
+ case ARM::BI__builtin_neon_vst3q_v:
+ case ARM::BI__builtin_neon_vst3_lane_v:
+ case ARM::BI__builtin_neon_vst3q_lane_v:
+ case ARM::BI__builtin_neon_vst4_v:
+ case ARM::BI__builtin_neon_vst4q_v:
+ case ARM::BI__builtin_neon_vst4_lane_v:
+ case ARM::BI__builtin_neon_vst4q_lane_v:
+ // Get the alignment for the argument in addition to the value;
+ // we'll use it later.
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(0));
+ Ops.push_back(Src.first);
+ Align = Builder.getInt32(Src.second);
+ continue;
+ }
+ }
+ if (i == 1) {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_v:
+ case ARM::BI__builtin_neon_vld2q_v:
+ case ARM::BI__builtin_neon_vld3_v:
+ case ARM::BI__builtin_neon_vld3q_v:
+ case ARM::BI__builtin_neon_vld4_v:
+ case ARM::BI__builtin_neon_vld4q_v:
+ case ARM::BI__builtin_neon_vld2_lane_v:
+ case ARM::BI__builtin_neon_vld2q_lane_v:
+ case ARM::BI__builtin_neon_vld3_lane_v:
+ case ARM::BI__builtin_neon_vld3q_lane_v:
+ case ARM::BI__builtin_neon_vld4_lane_v:
+ case ARM::BI__builtin_neon_vld4q_lane_v:
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ // Get the alignment for the argument in addition to the value;
+ // we'll use it later.
+ std::pair<llvm::Value*, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
+ Ops.push_back(Src.first);
+ Align = Builder.getInt32(Src.second);
+ continue;
+ }
+ }
Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ }
// vget_lane and vset_lane are not overloaded and do not have an extra
// argument that specifies the vector type.
@@ -1596,7 +1686,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ty = FloatTy;
else
Ty = DoubleTy;
-
+
// Determine whether this is an unsigned conversion or not.
bool usgn = Result.getZExtValue() == 1;
unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
@@ -1605,7 +1695,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Int, Ty);
return Builder.CreateCall(F, Ops, "vcvtr");
}
-
+
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
bool usgn = Type.isUnsigned();
@@ -1620,6 +1710,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
unsigned Int;
switch (BuiltinID) {
default: return 0;
+ case ARM::BI__builtin_neon_vbsl_v:
+ case ARM::BI__builtin_neon_vbslq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty),
+ Ops, "vbsl");
case ARM::BI__builtin_neon_vabd_v:
case ARM::BI__builtin_neon_vabdq_v:
Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
@@ -1690,7 +1784,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vcvtq_f32_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
- return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case ARM::BI__builtin_neon_vcvt_s32_v:
case ARM::BI__builtin_neon_vcvt_u32_v:
@@ -1699,7 +1793,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Type *FloatTy =
GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
- return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
+ return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
case ARM::BI__builtin_neon_vcvt_n_f32_v:
@@ -1730,7 +1824,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
-
+
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Value *SV = llvm::ConstantVector::get(Indices);
@@ -1746,7 +1840,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
case ARM::BI__builtin_neon_vld1_v:
case ARM::BI__builtin_neon_vld1q_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
Ops, "vld1");
case ARM::BI__builtin_neon_vld1q_lane_v:
@@ -1761,8 +1855,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Load the value as a one-element vector.
Ty = llvm::VectorType::get(VTy->getElementType(), 1);
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
- Value *Ld = Builder.CreateCall2(F, Ops[0],
- GetPointeeAlignmentValue(E->getArg(0)));
+ Value *Ld = Builder.CreateCall2(F, Ops[0], Align);
// Combine them.
SmallVector<Constant*, 2> Indices;
Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
@@ -1776,7 +1869,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
LoadInst *Ld = Builder.CreateLoad(Ops[0]);
- Value *Align = GetPointeeAlignmentValue(E->getArg(0));
Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
@@ -1786,7 +1878,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
LoadInst *Ld = Builder.CreateLoad(Ops[0]);
- Value *Align = GetPointeeAlignmentValue(E->getArg(0));
Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
@@ -1795,7 +1886,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld2_v:
case ARM::BI__builtin_neon_vld2q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
- Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1804,7 +1894,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld3_v:
case ARM::BI__builtin_neon_vld3q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
- Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1813,7 +1902,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld4_v:
case ARM::BI__builtin_neon_vld4q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
- Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1824,7 +1912,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops.push_back(Align);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1836,7 +1924,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops.push_back(Align);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1849,7 +1937,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops.push_back(Align);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1861,47 +1949,46 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Handle 64-bit elements as a special-case. There is no "dup" needed.
if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
switch (BuiltinID) {
- case ARM::BI__builtin_neon_vld2_dup_v:
- Int = Intrinsic::arm_neon_vld2;
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
break;
case ARM::BI__builtin_neon_vld3_dup_v:
- Int = Intrinsic::arm_neon_vld3;
+ Int = Intrinsic::arm_neon_vld3;
break;
case ARM::BI__builtin_neon_vld4_dup_v:
- Int = Intrinsic::arm_neon_vld4;
+ Int = Intrinsic::arm_neon_vld4;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
- Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
switch (BuiltinID) {
- case ARM::BI__builtin_neon_vld2_dup_v:
- Int = Intrinsic::arm_neon_vld2lane;
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
break;
case ARM::BI__builtin_neon_vld3_dup_v:
- Int = Intrinsic::arm_neon_vld3lane;
+ Int = Intrinsic::arm_neon_vld3lane;
break;
case ARM::BI__builtin_neon_vld4_dup_v:
- Int = Intrinsic::arm_neon_vld4lane;
+ Int = Intrinsic::arm_neon_vld4lane;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
-
+
SmallVector<Value*, 6> Args;
Args.push_back(Ops[1]);
Args.append(STy->getNumElements(), UndefValue::get(Ty));
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Args.push_back(CI);
- Args.push_back(GetPointeeAlignmentValue(E->getArg(1)));
-
+ Args.push_back(Align);
+
Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -1944,6 +2031,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
+ case ARM::BI__builtin_neon_vfma_v:
+ case ARM::BI__builtin_neon_vfmaq_v: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ return Builder.CreateCall3(F, Ops[0], Ops[1], Ops[2]);
+ }
case ARM::BI__builtin_neon_vpadal_v:
case ARM::BI__builtin_neon_vpadalq_v: {
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
@@ -2016,7 +2111,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl");
case ARM::BI__builtin_neon_vqrshrn_n_v:
- Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
+ Int =
+ usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
1, true);
case ARM::BI__builtin_neon_vqrshrun_n_v:
@@ -2086,7 +2182,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
+ Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
case ARM::BI__builtin_neon_vrsubhn_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
@@ -2101,7 +2197,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vshl_n_v:
case ARM::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
- return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
+ return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
+ "vshl_n");
case ARM::BI__builtin_neon_vshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
Ops, "vshrn_n", 1, true);
@@ -2133,7 +2230,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateAdd(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vst1_v:
case ARM::BI__builtin_neon_vst1q_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst1q_lane_v:
@@ -2143,7 +2240,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
- Ops[2] = GetPointeeAlignmentValue(E->getArg(0));
+ Ops[2] = Align;
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
Ops[1]->getType()), Ops);
}
@@ -2154,38 +2251,37 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
StoreInst *St = Builder.CreateStore(Ops[1],
Builder.CreateBitCast(Ops[0], Ty));
- Value *Align = GetPointeeAlignmentValue(E->getArg(0));
St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
return St;
}
case ARM::BI__builtin_neon_vst2_v:
case ARM::BI__builtin_neon_vst2q_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst2_lane_v:
case ARM::BI__builtin_neon_vst2q_lane_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_v:
case ARM::BI__builtin_neon_vst3q_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_lane_v:
case ARM::BI__builtin_neon_vst3q_lane_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_v:
case ARM::BI__builtin_neon_vst4q_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_lane_v:
case ARM::BI__builtin_neon_vst4q_lane_v:
- Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vsubhn_v:
@@ -2220,7 +2316,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
- Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
+ Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
ConstantAggregateZero::get(Ty));
return Builder.CreateSExt(Ops[0], Ty, "vtst");
}
@@ -2250,7 +2346,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = 0;
-
+
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
@@ -2263,13 +2359,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
return SV;
}
- case ARM::BI__builtin_neon_vzip_v:
+ case ARM::BI__builtin_neon_vzip_v:
case ARM::BI__builtin_neon_vzipq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = 0;
-
+
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
@@ -2382,62 +2478,62 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_palignr: {
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
-
+
// If palignr is shifting the pair of input vectors less than 9 bytes,
// emit a shuffle instruction.
if (shiftVal <= 8) {
SmallVector<llvm::Constant*, 8> Indices;
for (unsigned i = 0; i != 8; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
-
+
Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
}
-
+
// If palignr is shifting the pair of input vectors more than 8 but less
// than 16 bytes, emit a logical right shift of the destination.
if (shiftVal < 16) {
// MMX has these as 1 x i64 vectors for some odd optimization reasons.
llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
-
+
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
-
+
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
}
-
+
// If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_palignr128: {
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
-
+
// If palignr is shifting the pair of input vectors less than 17 bytes,
// emit a shuffle instruction.
if (shiftVal <= 16) {
SmallVector<llvm::Constant*, 16> Indices;
for (unsigned i = 0; i != 16; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
-
+
Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
}
-
+
// If palignr is shifting the pair of input vectors more than 16 but less
// than 32 bytes, emit a logical right shift of the destination.
if (shiftVal < 32) {
llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
-
+
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
-
+
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
}
-
+
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index aba5d75..91795b9 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -189,7 +189,7 @@ void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *ptr,
llvm::Value *&numElements,
llvm::Value *&allocPtr, CharUnits &cookieSize) {
// Derive a char* in the same address space as the pointer.
- unsigned AS = cast<llvm::PointerType>(ptr->getType())->getAddressSpace();
+ unsigned AS = ptr->getType()->getPointerAddressSpace();
llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
ptr = CGF.Builder.CreateBitCast(ptr, charPtrTy);
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index a0dcdfd..570aeb0 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -154,6 +154,15 @@ protected:
llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
public:
+ /// Adjust the given non-null pointer to an object of polymorphic
+ /// type to point to the complete object.
+ ///
+ /// The IR type of the result should be a pointer but is otherwise
+ /// irrelevant.
+ virtual llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ QualType type) = 0;
+
/// Build the signature of the given constructor variant by adding
/// any required parameters. For convenience, ResTy has been
/// initialized to 'void', and ArgTys has been initialized with the
@@ -196,6 +205,9 @@ public:
/// Gets the pure virtual member call function.
virtual StringRef GetPureVirtualCallName() = 0;
+ /// Gets the deleted virtual member call name.
+ virtual StringRef GetDeletedVirtualCallName() = 0;
+
/**************************** Array cookies ******************************/
/// Returns the extra size required in order to store the array
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 7d2b9d3..2d1d152 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -25,7 +25,7 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Attributes.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/InlineAsm.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace clang;
@@ -148,6 +148,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
if (PcsAttr *PCS = D->getAttr<PcsAttr>())
return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
+ if (D->hasAttr<PnaclCallAttr>())
+ return CC_PnaclCall;
+
return CC_C;
}
@@ -588,9 +591,9 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
// If the first elt is at least as large as what we're looking for, or if the
// first element is the same size as the whole struct, we can enter it.
uint64_t FirstEltSize =
- CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
+ CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
if (FirstEltSize < DstSize &&
- FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
+ FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
return SrcPtr;
// GEP into the first element.
@@ -653,14 +656,14 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
if (SrcTy == Ty)
return CGF.Builder.CreateLoad(SrcPtr);
- uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+ uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
}
- uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
// If the source and destination are integer or pointer types, just do an
// extension or truncation to the desired type.
@@ -740,7 +743,7 @@ static void CreateCoercedStore(llvm::Value *Src,
return;
}
- uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
@@ -756,7 +759,7 @@ static void CreateCoercedStore(llvm::Value *Src,
return;
}
- uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
+ uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
// If store is legal, just bitcast the src pointer.
if (SrcSize <= DstSize) {
@@ -864,6 +867,10 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
ie = FI.arg_end(); it != ie; ++it) {
const ABIArgInfo &argAI = it->info;
+ // Insert a padding type to ensure proper alignment.
+ if (llvm::Type *PaddingType = argAI.getPaddingType())
+ argTypes.push_back(PaddingType);
+
switch (argAI.getKind()) {
case ABIArgInfo::Ignore:
break;
@@ -877,9 +884,6 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- // Insert a padding type to ensure proper alignment.
- if (llvm::Type *PaddingType = argAI.getPaddingType())
- argTypes.push_back(PaddingType);
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
@@ -924,50 +928,52 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const Decl *TargetDecl,
AttributeListType &PAL,
unsigned &CallingConv) {
- llvm::Attributes FuncAttrs;
- llvm::Attributes RetAttrs;
+ llvm::AttrBuilder FuncAttrs;
+ llvm::AttrBuilder RetAttrs;
CallingConv = FI.getEffectiveCallingConvention();
if (FI.isNoReturn())
- FuncAttrs |= llvm::Attribute::NoReturn;
+ FuncAttrs.addAttribute(llvm::Attributes::NoReturn);
// FIXME: handle sseregparm someday...
if (TargetDecl) {
if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
- FuncAttrs |= llvm::Attribute::ReturnsTwice;
+ FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice);
if (TargetDecl->hasAttr<NoThrowAttr>())
- FuncAttrs |= llvm::Attribute::NoUnwind;
+ FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
if (FPT && FPT->isNothrow(getContext()))
- FuncAttrs |= llvm::Attribute::NoUnwind;
+ FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
}
if (TargetDecl->hasAttr<NoReturnAttr>())
- FuncAttrs |= llvm::Attribute::NoReturn;
+ FuncAttrs.addAttribute(llvm::Attributes::NoReturn);
if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
- FuncAttrs |= llvm::Attribute::ReturnsTwice;
+ FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice);
// 'const' and 'pure' attribute functions are also nounwind.
if (TargetDecl->hasAttr<ConstAttr>()) {
- FuncAttrs |= llvm::Attribute::ReadNone;
- FuncAttrs |= llvm::Attribute::NoUnwind;
+ FuncAttrs.addAttribute(llvm::Attributes::ReadNone);
+ FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
} else if (TargetDecl->hasAttr<PureAttr>()) {
- FuncAttrs |= llvm::Attribute::ReadOnly;
- FuncAttrs |= llvm::Attribute::NoUnwind;
+ FuncAttrs.addAttribute(llvm::Attributes::ReadOnly);
+ FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
}
if (TargetDecl->hasAttr<MallocAttr>())
- RetAttrs |= llvm::Attribute::NoAlias;
+ RetAttrs.addAttribute(llvm::Attributes::NoAlias);
}
if (CodeGenOpts.OptimizeSize)
- FuncAttrs |= llvm::Attribute::OptimizeForSize;
+ FuncAttrs.addAttribute(llvm::Attributes::OptimizeForSize);
+ if (CodeGenOpts.OptimizeSize == 2)
+ FuncAttrs.addAttribute(llvm::Attributes::MinSize);
if (CodeGenOpts.DisableRedZone)
- FuncAttrs |= llvm::Attribute::NoRedZone;
+ FuncAttrs.addAttribute(llvm::Attributes::NoRedZone);
if (CodeGenOpts.NoImplicitFloat)
- FuncAttrs |= llvm::Attribute::NoImplicitFloat;
+ FuncAttrs.addAttribute(llvm::Attributes::NoImplicitFloat);
QualType RetTy = FI.getReturnType();
unsigned Index = 1;
@@ -975,24 +981,28 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
if (RetTy->hasSignedIntegerRepresentation())
- RetAttrs |= llvm::Attribute::SExt;
+ RetAttrs.addAttribute(llvm::Attributes::SExt);
else if (RetTy->hasUnsignedIntegerRepresentation())
- RetAttrs |= llvm::Attribute::ZExt;
+ RetAttrs.addAttribute(llvm::Attributes::ZExt);
break;
case ABIArgInfo::Direct:
case ABIArgInfo::Ignore:
break;
case ABIArgInfo::Indirect: {
- llvm::Attributes SRETAttrs = llvm::Attribute::StructRet;
+ llvm::AttrBuilder SRETAttrs;
+ SRETAttrs.addAttribute(llvm::Attributes::StructRet);
if (RetAI.getInReg())
- SRETAttrs |= llvm::Attribute::InReg;
- PAL.push_back(llvm::AttributeWithIndex::get(Index, SRETAttrs));
+ SRETAttrs.addAttribute(llvm::Attributes::InReg);
+ PAL.push_back(llvm::
+ AttributeWithIndex::get(Index,
+ llvm::Attributes::get(getLLVMContext(),
+ SRETAttrs)));
++Index;
// sret disables readnone and readonly
- FuncAttrs &= ~(llvm::Attribute::ReadOnly |
- llvm::Attribute::ReadNone);
+ FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly)
+ .removeAttribute(llvm::Attributes::ReadNone);
break;
}
@@ -1000,14 +1010,29 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
llvm_unreachable("Invalid ABI kind for return argument");
}
- if (RetAttrs)
- PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
+ if (RetAttrs.hasAttributes())
+ PAL.push_back(llvm::
+ AttributeWithIndex::get(llvm::AttrListPtr::ReturnIndex,
+ llvm::Attributes::get(getLLVMContext(),
+ RetAttrs)));
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = FI.arg_end(); it != ie; ++it) {
QualType ParamType = it->type;
const ABIArgInfo &AI = it->info;
- llvm::Attributes Attrs;
+ llvm::AttrBuilder Attrs;
+
+ if (AI.getPaddingType()) {
+ if (AI.getPaddingInReg()) {
+ llvm::AttrBuilder PadAttrs;
+ PadAttrs.addAttribute(llvm::Attributes::InReg);
+
+ llvm::Attributes A =llvm::Attributes::get(getLLVMContext(), PadAttrs);
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, A));
+ }
+ // Increment Index if there is padding.
+ ++Index;
+ }
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
@@ -1015,38 +1040,40 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
switch (AI.getKind()) {
case ABIArgInfo::Extend:
if (ParamType->isSignedIntegerOrEnumerationType())
- Attrs |= llvm::Attribute::SExt;
+ Attrs.addAttribute(llvm::Attributes::SExt);
else if (ParamType->isUnsignedIntegerOrEnumerationType())
- Attrs |= llvm::Attribute::ZExt;
+ Attrs.addAttribute(llvm::Attributes::ZExt);
// FALL THROUGH
case ABIArgInfo::Direct:
if (AI.getInReg())
- Attrs |= llvm::Attribute::InReg;
+ Attrs.addAttribute(llvm::Attributes::InReg);
// FIXME: handle sseregparm someday...
- // Increment Index if there is padding.
- Index += (AI.getPaddingType() != 0);
-
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
- if (Attrs != llvm::Attribute::None)
+ if (Attrs.hasAttributes())
for (unsigned I = 0; I < Extra; ++I)
- PAL.push_back(llvm::AttributeWithIndex::get(Index + I, Attrs));
+ PAL.push_back(llvm::AttributeWithIndex::get(Index + I,
+ llvm::Attributes::get(getLLVMContext(),
+ Attrs)));
Index += Extra;
}
break;
case ABIArgInfo::Indirect:
+ if (AI.getInReg())
+ Attrs.addAttribute(llvm::Attributes::InReg);
+
if (AI.getIndirectByVal())
- Attrs |= llvm::Attribute::ByVal;
+ Attrs.addAttribute(llvm::Attributes::ByVal);
+
+ Attrs.addAlignmentAttr(AI.getIndirectAlign());
- Attrs |=
- llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
// byval disables readnone and readonly.
- FuncAttrs &= ~(llvm::Attribute::ReadOnly |
- llvm::Attribute::ReadNone);
+ FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly)
+ .removeAttribute(llvm::Attributes::ReadNone);
break;
case ABIArgInfo::Ignore:
@@ -1064,12 +1091,17 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
}
}
- if (Attrs)
- PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
+ if (Attrs.hasAttributes())
+ PAL.push_back(llvm::AttributeWithIndex::get(Index,
+ llvm::Attributes::get(getLLVMContext(),
+ Attrs)));
++Index;
}
- if (FuncAttrs)
- PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
+ if (FuncAttrs.hasAttributes())
+ PAL.push_back(llvm::
+ AttributeWithIndex::get(llvm::AttrListPtr::FunctionIndex,
+ llvm::Attributes::get(getLLVMContext(),
+ FuncAttrs)));
}
/// An argument came in as a promoted argument; demote it back to its
@@ -1117,7 +1149,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Name the struct return argument.
if (CGM.ReturnTypeUsesSRet(FI)) {
AI->setName("agg.result");
- AI->addAttr(llvm::Attribute::NoAlias);
+ AI->addAttr(llvm::Attributes::get(getLLVMContext(),
+ llvm::Attributes::NoAlias));
++AI;
}
@@ -1134,6 +1167,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
bool isPromoted =
isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
+ // Skip the dummy padding argument.
+ if (ArgI.getPaddingType())
+ ++AI;
+
switch (ArgI.getKind()) {
case ABIArgInfo::Indirect: {
llvm::Value *V = AI;
@@ -1175,9 +1212,6 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- // Skip the dummy padding argument.
- if (ArgI.getPaddingType())
- ++AI;
// If we have the trivial case, handle it with no muss and fuss.
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
@@ -1187,7 +1221,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Value *V = AI;
if (Arg->getType().isRestrictQualified())
- AI->addAttr(llvm::Attribute::NoAlias);
+ AI->addAttr(llvm::Attributes::get(getLLVMContext(),
+ llvm::Attributes::NoAlias));
// Ensure the argument is the correct type.
if (V->getType() != ArgI.getCoerceToType())
@@ -1205,7 +1240,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// The alignment we need to use is the max of the requested alignment for
// the argument plus the alignment required by our access code below.
unsigned AlignmentToUse =
- CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
+ CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
AlignmentToUse = std::max(AlignmentToUse,
(unsigned)getContext().getDeclAlign(Arg).getQuantity());
@@ -1226,10 +1261,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// and the optimizer generally likes scalar values better than FCAs.
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (STy && STy->getNumElements() > 1) {
- uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
+ uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
llvm::Type *DstTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
- uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
+ uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
if (SrcSize <= DstSize) {
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
@@ -1363,12 +1398,23 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
.objc_retainAutoreleasedReturnValue) {
doRetainAutorelease = false;
- // Look for an inline asm immediately preceding the call and kill it, too.
- llvm::Instruction *prev = call->getPrevNode();
- if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
- if (asmCall->getCalledValue()
- == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
- insnsToKill.push_back(prev);
+ // If we emitted an assembly marker for this call (and the
+ // ARCEntrypoints field should have been set if so), go looking
+ // for that call. If we can't find it, we can't do this
+ // optimization. But it should always be the immediately previous
+ // instruction, unless we needed bitcasts around the call.
+ if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
+ llvm::Instruction *prev = call->getPrevNode();
+ assert(prev);
+ if (isa<llvm::BitCastInst>(prev)) {
+ prev = prev->getPrevNode();
+ assert(prev);
+ }
+ assert(isa<llvm::CallInst>(prev));
+ assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
+ CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
+ insnsToKill.push_back(prev);
+ }
} else {
return 0;
}
@@ -1755,7 +1801,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
if (const ObjCIndirectCopyRestoreExpr *CRE
= dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
- assert(getContext().getLangOpts().ObjCAutoRefCount);
+ assert(getLangOpts().ObjCAutoRefCount);
assert(getContext().hasSameType(E->getType(), type));
return emitWritebackArg(*this, args, CRE);
}
@@ -1943,6 +1989,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned TypeAlign =
getContext().getTypeAlignInChars(I->Ty).getQuantity();
+
+ // Insert a padding argument to ensure proper alignment.
+ if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
+ Args.push_back(llvm::UndefValue::get(PaddingType));
+ ++IRArgNo;
+ }
+
switch (ArgInfo.getKind()) {
case ABIArgInfo::Indirect: {
if (RV.isScalar() || RV.isComplex()) {
@@ -1969,7 +2022,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// we cannot force it to be sufficiently aligned.
llvm::Value *Addr = RV.getAggregateAddr();
unsigned Align = ArgInfo.getIndirectAlign();
- const llvm::TargetData *TD = &CGM.getTargetData();
+ const llvm::DataLayout *TD = &CGM.getDataLayout();
if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
(ArgInfo.getIndirectByVal() && TypeAlign < Align &&
llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
@@ -1998,12 +2051,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- // Insert a padding argument to ensure proper alignment.
- if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
- Args.push_back(llvm::UndefValue::get(PaddingType));
- ++IRArgNo;
- }
-
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
@@ -2049,8 +2096,25 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// and the optimizer generally likes scalar values better than FCAs.
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
- SrcPtr = Builder.CreateBitCast(SrcPtr,
- llvm::PointerType::getUnqual(STy));
+ llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
+
+ // If the source type is smaller than the destination type of the
+ // coerce-to logic, copy the source value into a temp alloca the size
+ // of the destination type to allow loading all of it. The bits past
+ // the source value are left undef.
+ if (SrcSize < DstSize) {
+ llvm::AllocaInst *TempAlloca
+ = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
+ Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
+ SrcPtr = TempAlloca;
+ } else {
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(STy));
+ }
+
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
@@ -2113,10 +2177,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
- llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList);
+ llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(getLLVMContext(),
+ AttributeList);
llvm::BasicBlock *InvokeDest = 0;
- if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
+ if (!Attrs.getFnAttributes().hasAttribute(llvm::Attributes::NoUnwind))
InvokeDest = getInvokeDest();
llvm::CallSite CS;
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index e37fa3a..b2225e4 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -542,12 +542,6 @@ namespace {
};
}
-static bool hasTrivialCopyOrMoveConstructor(const CXXRecordDecl *Record,
- bool Moving) {
- return Moving ? Record->hasTrivialMoveConstructor() :
- Record->hasTrivialCopyConstructor();
-}
-
static void EmitMemberInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
CXXCtorInitializer *MemberInit,
@@ -588,12 +582,11 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
if (Array && Constructor->isImplicitlyDefined() &&
Constructor->isCopyOrMoveConstructor()) {
QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
- const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
+ CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
if (BaseElementTy.isPODType(CGF.getContext()) ||
- (Record && hasTrivialCopyOrMoveConstructor(Record,
- Constructor->isMoveConstructor()))) {
- // Find the source pointer. We knows it's the last argument because
- // we know we're in a copy constructor.
+ (CE && CE->getConstructor()->isTrivial())) {
+ // Find the source pointer. We know it's the last argument because
+ // we know we're in an implicit copy constructor.
unsigned SrcArgIndex = Args.size() - 1;
llvm::Value *SrcPtr
= CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
@@ -952,8 +945,8 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
}
// -fapple-kext must inline any call to this dtor into
// the caller's body.
- if (getContext().getLangOpts().AppleKext)
- CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
+ if (getLangOpts().AppleKext)
+ CurFn->addFnAttr(llvm::Attributes::AlwaysInline);
break;
}
@@ -1238,7 +1231,7 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CGDebugInfo *DI = getDebugInfo();
if (DI &&
- CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo) {
+ CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo) {
// If debug info for this class has not been emitted then this is the
// right time to do so.
const CXXRecordDecl *Parent = D->getParent();
@@ -1268,7 +1261,9 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type), ForVirtualBase);
llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
- EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, VTT, ArgBeg, ArgEnd);
+ // FIXME: Provide a source location here.
+ EmitCXXMemberCall(D, SourceLocation(), Callee, ReturnValueSlot(), This,
+ VTT, ArgBeg, ArgEnd);
}
void
@@ -1413,14 +1408,16 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
ForVirtualBase);
llvm::Value *Callee = 0;
- if (getContext().getLangOpts().AppleKext)
+ if (getLangOpts().AppleKext)
Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
DD->getParent());
if (!Callee)
Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
- EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
+ // FIXME: Provide a source location here.
+ EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
+ VTT, 0, 0);
}
namespace {
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index fd1c7a3..80fa09b 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -15,6 +15,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "CGBlocks.h"
+#include "CGObjCRuntime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
@@ -34,7 +35,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace clang::CodeGen;
@@ -157,7 +158,7 @@ StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
OS << OID->getName();
} else if (const ObjCCategoryImplDecl *OCD =
dyn_cast<const ObjCCategoryImplDecl>(DC)){
- OS << ((NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
+ OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
OCD->getIdentifier()->getNameStart() << ')';
}
OS << ' ' << OMD->getSelector().getAsString() << ']';
@@ -254,9 +255,13 @@ unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
return PLoc.isValid()? PLoc.getLine() : 0;
}
-/// getColumnNumber - Get column number for the location. If location is
-/// invalid then use current location.
+/// getColumnNumber - Get column number for the location.
unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
+ // We may not want column information at all.
+ if (!CGM.getCodeGenOpts().DebugColumnInfo)
+ return 0;
+
+ // If the location is invalid then use the current column.
if (Loc.isInvalid() && CurLoc.isInvalid())
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
@@ -347,44 +352,60 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
llvm_unreachable("Unexpected builtin type");
case BuiltinType::NullPtr:
return DBuilder.
- createNullPtrType(BT->getName(CGM.getContext().getLangOpts()));
+ createNullPtrType(BT->getName(CGM.getLangOpts()));
case BuiltinType::Void:
return llvm::DIType();
case BuiltinType::ObjCClass:
- return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", TheCU,
- getOrCreateMainFile(), 0);
+ if (ClassTy.Verify())
+ return ClassTy;
+ ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", TheCU,
+ getOrCreateMainFile(), 0);
+ return ClassTy;
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
// typedef struct objc_object {
// Class isa;
// } *id;
- // TODO: Cache these two types to avoid duplicates.
- llvm::DIType OCTy =
- DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", TheCU, getOrCreateMainFile(), 0);
+ if (ObjTy.Verify())
+ return ObjTy;
+
+ if (!ClassTy.Verify())
+ ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", TheCU,
+ getOrCreateMainFile(), 0);
+
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
- llvm::DIType ISATy = DBuilder.createPointerType(OCTy, Size);
+ llvm::DIType ISATy = DBuilder.createPointerType(ClassTy, Size);
+
+ llvm::DIType FwdTy = DBuilder.createStructType(TheCU, "objc_object",
+ getOrCreateMainFile(),
+ 0, 0, 0, 0,
+ llvm::DIArray());
- SmallVector<llvm::Value *, 16> EltTys;
+ llvm::TrackingVH<llvm::MDNode> ObjNode(FwdTy);
+ SmallVector<llvm::Value *, 1> EltTys;
llvm::DIType FieldTy =
- DBuilder.createMemberType(getOrCreateMainFile(), "isa",
+ DBuilder.createMemberType(llvm::DIDescriptor(ObjNode), "isa",
getOrCreateMainFile(), 0, Size,
0, 0, 0, ISATy);
EltTys.push_back(FieldTy);
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
-
- return DBuilder.createStructType(TheCU, "objc_object",
- getOrCreateMainFile(),
- 0, 0, 0, 0, Elements);
+
+ ObjNode->replaceOperandWith(10, Elements);
+ ObjTy = llvm::DIType(ObjNode);
+ return ObjTy;
}
case BuiltinType::ObjCSel: {
- return
+ if (SelTy.Verify())
+ return SelTy;
+ SelTy =
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
"objc_selector", TheCU, getOrCreateMainFile(),
0);
+ return SelTy;
}
case BuiltinType::UChar:
case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
@@ -417,7 +438,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::ULong: BTName = "long unsigned int"; break;
case BuiltinType::ULongLong: BTName = "long long unsigned int"; break;
default:
- BTName = BT->getName(CGM.getContext().getLangOpts());
+ BTName = BT->getName(CGM.getLangOpts());
break;
}
// Bit size, align and offset of the type.
@@ -498,21 +519,17 @@ llvm::DIType CGDebugInfo::createRecordFwdDecl(const RecordDecl *RD,
llvm::DIDescriptor Ctx) {
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
unsigned Line = getLineNumber(RD->getLocation());
- StringRef RDName = RD->getName();
+ StringRef RDName = getClassName(RD);
- // Get the tag.
- const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
unsigned Tag = 0;
- if (CXXDecl) {
- RDName = getClassName(RD);
- Tag = llvm::dwarf::DW_TAG_class_type;
- }
- else if (RD->isStruct())
+ if (RD->isStruct() || RD->isInterface())
Tag = llvm::dwarf::DW_TAG_structure_type;
else if (RD->isUnion())
Tag = llvm::dwarf::DW_TAG_union_type;
- else
- llvm_unreachable("Unknown RecordDecl type!");
+ else {
+ assert(RD->isClass());
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
// Create the type.
return DBuilder.createForwardDecl(Tag, RDName, Ctx, DefUnit, Line);
@@ -550,7 +567,7 @@ llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) {
/// then emit record's fwd if debug info size reduction is enabled.
llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
llvm::DIFile Unit) {
- if (CGM.getCodeGenOpts().DebugInfo != CodeGenOptions::LimitedDebugInfo)
+ if (CGM.getCodeGenOpts().getDebugInfo() != CodeGenOptions::LimitedDebugInfo)
return getOrCreateType(PointeeTy, Unit);
// Limit debug info for the pointee type.
@@ -777,8 +794,6 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(),
E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) {
const LambdaExpr::Capture C = *I;
- // TODO: Need to handle 'this' in some way by probably renaming the
- // this of the lambda class and having a field member of 'this'.
if (C.capturesVariable()) {
VarDecl *V = C.getCapturedVar();
llvm::DIFile VUnit = getOrCreateFile(C.getLocation());
@@ -793,10 +808,24 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
Field->getAccess(), layout.getFieldOffset(fieldno),
VUnit, RecordTy);
elements.push_back(fieldType);
+ } else {
+ // TODO: Need to handle 'this' in some way by probably renaming the
+ // this of the lambda class and having a field member of 'this' or
+ // by using AT_object_pointer for the function and having that be
+ // used as 'this' for semantic references.
+ assert(C.capturesThis() && "Field that isn't captured and isn't this?");
+ FieldDecl *f = *Field;
+ llvm::DIFile VUnit = getOrCreateFile(f->getLocation());
+ QualType type = f->getType();
+ llvm::DIType fieldType
+ = createFieldType("this", type, 0, f->getLocation(), f->getAccess(),
+ layout.getFieldOffset(fieldNo), VUnit, RecordTy);
+
+ elements.push_back(fieldType);
}
}
} else {
- bool IsMsStruct = record->hasAttr<MsStructAttr>();
+ bool IsMsStruct = record->isMsStruct(CGM.getContext());
const FieldDecl *LastFD = 0;
for (RecordDecl::field_iterator I = record->field_begin(),
E = record->field_end();
@@ -875,12 +904,12 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
// TODO: This and the artificial type below are misleading, the
// types aren't artificial the argument is, but the current
// metadata doesn't represent that.
- ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType);
Elts.push_back(ThisPtrType);
} else {
llvm::DIType ThisPtrType = getOrCreateType(ThisPtr, Unit);
TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
- ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType);
Elts.push_back(ThisPtrType);
}
}
@@ -995,12 +1024,8 @@ CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
if (D->isImplicit() && !D->isUsed())
continue;
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
- // Only emit debug information for user provided functions, we're
- // unlikely to want info for artificial functions.
- if (Method->isUserProvided())
- EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
- }
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
else if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
for (FunctionTemplateDecl::spec_iterator SI = FTD->spec_begin(),
SE = FTD->spec_end(); SI != SE; ++SI)
@@ -1182,7 +1207,7 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// getOrCreateRecordType - Emit record type's standalone debug info.
llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
SourceLocation Loc) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
return T;
}
@@ -1191,7 +1216,7 @@ llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
/// debug info.
llvm::DIType CGDebugInfo::getOrCreateInterfaceType(QualType D,
SourceLocation Loc) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
llvm::DIType T = getOrCreateType(D, getOrCreateFile(Loc));
DBuilder.retainType(T);
return T;
@@ -1388,12 +1413,21 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
FieldAlign = CGM.getContext().getTypeAlign(FType);
}
- // We can't know the offset of our ivar in the structure if we're using
- // the non-fragile abi and the debugger should ignore the value anyways.
- // Call it the FieldNo+1 due to how debuggers use the information,
- // e.g. negating the value when it needs a lookup in the dynamic table.
- uint64_t FieldOffset = CGM.getLangOpts().ObjCRuntime.isNonFragile()
- ? FieldNo+1 : RL.getFieldOffset(FieldNo);
+ uint64_t FieldOffset;
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
+ // We don't know the runtime offset of an ivar if we're using the
+ // non-fragile ABI. For bitfields, use the bit offset into the first
+ // byte of storage of the bitfield. For other fields, use zero.
+ if (Field->isBitField()) {
+ FieldOffset = CGM.getObjCRuntime().ComputeBitfieldBitOffset(
+ CGM, ID, Field);
+ FieldOffset %= CGM.getContext().getCharWidth();
+ } else {
+ FieldOffset = 0;
+ }
+ } else {
+ FieldOffset = RL.getFieldOffset(FieldNo);
+ }
unsigned Flags = 0;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
@@ -1570,9 +1604,29 @@ llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
/// CreateEnumType - get enumeration type.
llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
- SmallVector<llvm::Value *, 16> Enumerators;
+ uint64_t Size = 0;
+ uint64_t Align = 0;
+ if (!ED->getTypeForDecl()->isIncompleteType()) {
+ Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ }
+
+ // If this is just a forward declaration, construct an appropriately
+ // marked node and just return it.
+ if (!ED->getDefinition()) {
+ llvm::DIDescriptor EDContext;
+ EDContext = getContextDescriptor(cast<Decl>(ED->getDeclContext()));
+ llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
+ unsigned Line = getLineNumber(ED->getLocation());
+ StringRef EDName = ED->getName();
+ return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_enumeration_type,
+ EDName, EDContext, DefUnit, Line, 0,
+ Size, Align);
+ }
// Create DIEnumerator elements for each enumerator.
+ SmallVector<llvm::Value *, 16> Enumerators;
+ ED = ED->getDefinition();
for (EnumDecl::enumerator_iterator
Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
Enum != EnumEnd; ++Enum) {
@@ -1586,21 +1640,14 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
- uint64_t Size = 0;
- uint64_t Align = 0;
- if (!ED->getTypeForDecl()->isIncompleteType()) {
- Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
- Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
- }
llvm::DIDescriptor EnumContext =
getContextDescriptor(cast<Decl>(ED->getDeclContext()));
llvm::DIType ClassTy = ED->isScopedUsingClassTag() ?
getOrCreateType(ED->getIntegerType(), DefUnit) : llvm::DIType();
- unsigned Flags = !ED->isCompleteDefinition() ? llvm::DIDescriptor::FlagFwdDecl : 0;
llvm::DIType DbgTy =
DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
Size, Align, EltArray,
- ClassTy, Flags);
+ ClassTy);
return DbgTy;
}
@@ -1838,10 +1885,10 @@ llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
unsigned Line = getLineNumber(RD->getLocation());
- StringRef RDName = RD->getName();
+ StringRef RDName = getClassName(RD);
llvm::DIDescriptor RDContext;
- if (CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo)
+ if (CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo)
RDContext = createContextChain(cast<Decl>(RD->getDeclContext()));
else
RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext()));
@@ -1859,9 +1906,7 @@ llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
if (RD->isUnion())
RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
Size, Align, 0, llvm::DIArray());
- else if (CXXDecl) {
- RDName = getClassName(RD);
-
+ else if (RD->isClass()) {
// FIXME: This could be a struct type giving a default visibility different
// than C++ class type, but needs llvm metadata changes first.
RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
@@ -1969,7 +2014,7 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
// getOrCreateFunctionType - Construct DIType. If it is a c++ method, include
// implicit parameter "this".
-llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
+llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
QualType FnType,
llvm::DIFile F) {
@@ -1982,9 +2027,11 @@ llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
// First element is always return type. For 'void' functions it is NULL.
Elts.push_back(getOrCreateType(OMethod->getResultType(), F));
// "self" pointer is always first argument.
- Elts.push_back(getOrCreateType(OMethod->getSelfDecl()->getType(), F));
- // "cmd" pointer is always second argument.
- Elts.push_back(getOrCreateType(OMethod->getCmdDecl()->getType(), F));
+ llvm::DIType SelfTy = getOrCreateType(OMethod->getSelfDecl()->getType(), F);
+ Elts.push_back(DBuilder.createObjectPointerType(SelfTy));
+ // "_cmd" pointer is always second argument.
+ llvm::DIType CmdTy = getOrCreateType(OMethod->getCmdDecl()->getType(), F);
+ Elts.push_back(DBuilder.createArtificialType(CmdTy));
// Get rest of the arguments.
for (ObjCMethodDecl::param_const_iterator PI = OMethod->param_begin(),
PE = OMethod->param_end(); PI != PE; ++PI)
@@ -2007,14 +2054,22 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
FnBeginRegionCount.push_back(LexicalBlockStack.size());
const Decl *D = GD.getDecl();
+ // Function may lack declaration in source code if it is created by Clang
+ // CodeGen (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
+ bool HasDecl = (D != 0);
// Use the location of the declaration.
- SourceLocation Loc = D->getLocation();
-
+ SourceLocation Loc;
+ if (HasDecl)
+ Loc = D->getLocation();
+
unsigned Flags = 0;
llvm::DIFile Unit = getOrCreateFile(Loc);
llvm::DIDescriptor FDContext(Unit);
llvm::DIArray TParamsArray;
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (!HasDecl) {
+ // Use llvm function name.
+ Name = Fn->getName();
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
FI = SPCache.find(FD->getCanonicalDecl());
@@ -2035,10 +2090,10 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
Flags |= llvm::DIDescriptor::FlagPrototyped;
}
if (LinkageName == Name ||
- CGM.getCodeGenOpts().DebugInfo <= CodeGenOptions::DebugLineTablesOnly)
+ CGM.getCodeGenOpts().getDebugInfo() <= CodeGenOptions::DebugLineTablesOnly)
LinkageName = StringRef();
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
if (const NamespaceDecl *NSDecl =
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNameSpace(NSDecl);
@@ -2061,12 +2116,13 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
Name = Name.substr(1);
unsigned LineNo = getLineNumber(Loc);
- if (D->isImplicit())
+ if (!HasDecl || D->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
llvm::DIType DIFnType;
llvm::DISubprogram SPDecl;
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (HasDecl &&
+ CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
DIFnType = getOrCreateFunctionType(D, FnType, Unit);
SPDecl = getFunctionDeclaration(D);
} else {
@@ -2089,7 +2145,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
// Push function on region stack.
llvm::MDNode *SPN = SP;
LexicalBlockStack.push_back(SPN);
- RegionMap[D] = llvm::WeakVH(SP);
+ if (HasDecl)
+ RegionMap[D] = llvm::WeakVH(SP);
}
/// EmitLocation - Emit metadata to indicate a change in line/column
@@ -2242,7 +2299,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Value *Storage,
unsigned ArgNo, CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
@@ -2253,7 +2310,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
else
Ty = getOrCreateType(VD->getType(), Unit);
- // If there is not any debug info for type then do not emit debug info
+ // If there is no debug info for this type then do not emit debug info
// for this variable.
if (!Ty)
return;
@@ -2279,8 +2336,16 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
unsigned Flags = 0;
if (VD->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
+ // If this is the first argument and it is implicit then
+ // give it an object pointer flag.
+ // FIXME: There has to be a better way to do this, but for static
+ // functions there won't be an implicit param at arg1 and
+ // otherwise it is 'self' or 'this'.
+ if (isa<ImplicitParamDecl>(VD) && ArgNo == 1)
+ Flags |= llvm::DIDescriptor::FlagObjectPointer;
+
llvm::MDNode *Scope = LexicalBlockStack.back();
-
+
StringRef Name = VD->getName();
if (!Name.empty()) {
if (VD->hasAttr<BlocksAttr>()) {
@@ -2376,14 +2441,15 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
llvm::Value *Storage,
CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder);
}
-void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
- const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
- const CGBlockInfo &blockInfo) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
+ llvm::Value *Storage,
+ CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo) {
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
if (Builder.GetInsertBlock() == 0)
@@ -2399,11 +2465,16 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
else
Ty = getOrCreateType(VD->getType(), Unit);
+ // Self is passed along as an implicit non-arg variable in a
+ // block. Mark it as the object pointer.
+ if (isa<ImplicitParamDecl>(VD) && VD->getName() == "self")
+ Ty = DBuilder.createObjectPointerType(Ty);
+
// Get location information.
unsigned Line = getLineNumber(VD->getLocation());
unsigned Column = getColumnNumber(VD->getLocation());
- const llvm::TargetData &target = CGM.getTargetData();
+ const llvm::DataLayout &target = CGM.getDataLayout();
CharUnits offset = CharUnits::fromQuantity(
target.getStructLayout(blockInfo.StructureType)
@@ -2418,7 +2489,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
offset = CGM.getContext()
- .toCharUnitsFromBits(target.getPointerSizeInBits());
+ .toCharUnitsFromBits(target.getPointerSizeInBits(0));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
@@ -2444,7 +2515,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
unsigned ArgNo,
CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, ArgNo, Builder);
}
@@ -2461,7 +2532,7 @@ namespace {
void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::Value *addr,
CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
ASTContext &C = CGM.getContext();
const BlockDecl *blockDecl = block.getBlockDecl();
@@ -2475,7 +2546,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
getContextDescriptor(cast<Decl>(blockDecl->getDeclContext()));
const llvm::StructLayout *blockLayout =
- CGM.getTargetData().getStructLayout(block.StructureType);
+ CGM.getDataLayout().getStructLayout(block.StructureType);
SmallVector<llvm::Value*, 16> fields;
fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public,
@@ -2606,7 +2677,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
/// EmitGlobalVariable - Emit information about a global variable.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(D->getLocation());
unsigned LineNo = getLineNumber(D->getLocation());
@@ -2640,7 +2711,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
/// EmitGlobalVariable - Emit information about an objective-c interface.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ObjCInterfaceDecl *ID) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
unsigned LineNo = getLineNumber(ID->getLocation());
@@ -2666,7 +2737,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
/// EmitGlobalVariable - Emit global variable's debug info.
void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
llvm::Constant *Init) {
- assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
+ assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
// Create the descriptor for the variable.
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
StringRef Name = VD->getName();
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 44cc49a..2e88a73 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -50,6 +50,9 @@ class CGDebugInfo {
llvm::DICompileUnit TheCU;
SourceLocation CurLoc, PrevLoc;
llvm::DIType VTablePtrType;
+ llvm::DIType ClassTy;
+ llvm::DIType ObjTy;
+ llvm::DIType SelTy;
/// TypeCache - Cache of previously constructed Types.
llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index be6638e..8870587 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -24,7 +24,7 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Intrinsics.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Type.h"
using namespace clang;
using namespace CodeGen;
@@ -121,7 +121,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
// uniqued. We can't do this in C, though, because there's no
// standard way to agree on which variables are the same (i.e.
// there's no mangling).
- if (getContext().getLangOpts().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
Linkage = CurFn->getLinkage();
@@ -141,7 +141,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
const char *Separator) {
CodeGenModule &CGM = CGF.CGM;
- if (CGF.getContext().getLangOpts().CPlusPlus) {
+ if (CGF.getLangOpts().CPlusPlus) {
StringRef Name = CGM.getMangledName(&D);
return Name.str();
}
@@ -184,12 +184,14 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
Name = GetStaticDeclName(*this, D, Separator);
llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ unsigned AddrSpace =
+ CGM.GetGlobalVarAddressSpace(&D, CGM.getContext().getTargetAddressSpace(Ty));
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
CGM.EmitNullConstant(D.getType()), Name, 0,
llvm::GlobalVariable::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(Ty));
+ AddrSpace);
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
if (Linkage != llvm::GlobalValue::InternalLinkage)
GV->setVisibility(CurFn->getVisibility());
@@ -220,7 +222,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
// If constant emission failed, then this should be a C++ static
// initializer.
if (!Init) {
- if (!getContext().getLangOpts().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
else if (Builder.GetInsertBlock()) {
// Since we have a static initializer, this global variable can't
@@ -331,7 +333,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
if (DI &&
- CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
DI->EmitGlobalVariable(var, &D);
}
@@ -704,9 +706,8 @@ static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
/// stores that would be required.
static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
bool isVolatile, CGBuilderTy &Builder) {
- // Zero doesn't require a store.
- if (Init->isNullValue() || isa<llvm::UndefValue>(Init))
- return;
+ assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
+ "called emitStoresForInitAfterMemset for zero or undef value.");
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
@@ -719,10 +720,11 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
dyn_cast<llvm::ConstantDataSequential>(Init)) {
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
llvm::Constant *Elt = CDS->getElementAsConstant(i);
-
- // Get a pointer to the element and emit it.
- emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
- isVolatile, Builder);
+
+ // If necessary, get a pointer to the element and emit it.
+ if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
}
return;
}
@@ -732,9 +734,11 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
- // Get a pointer to the element and emit it.
- emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
- isVolatile, Builder);
+
+ // If necessary, get a pointer to the element and emit it.
+ if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
}
}
@@ -791,7 +795,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
- bool NRVO = getContext().getLangOpts().ElideConstructors &&
+ bool NRVO = getLangOpts().ElideConstructors &&
D.isNRVOVariable();
// If this value is a POD array or struct with a statically
@@ -910,7 +914,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Emit debug info for local var declaration.
if (HaveInsertPoint())
if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
if (Target.useGlobalsForAutomaticVariables()) {
DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr),
@@ -1056,10 +1061,11 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
// If the initializer is all or mostly zeros, codegen with memset then do
// a few stores afterward.
if (shouldUseMemSetPlusStoresToInitialize(constant,
- CGM.getTargetData().getTypeAllocSize(constant->getType()))) {
+ CGM.getDataLayout().getTypeAllocSize(constant->getType()))) {
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
alignment.getQuantity(), isVolatile);
- if (!constant->isNullValue()) {
+ // Zero and undef don't require a stores.
+ if (!constant->isNullValue() && !isa<llvm::UndefValue>(constant)) {
Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo());
emitStoresForInitAfterMemset(constant, Loc, isVolatile, Builder);
}
@@ -1493,8 +1499,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
LocalDeclMap[&D] = Arg;
if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().DebugInfo >=
- CodeGenOptions::LimitedDebugInfo) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, Builder);
}
@@ -1576,7 +1582,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// Emit debug info for param declaration.
if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
}
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 492b95a..65be3c1 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -16,6 +16,7 @@
#include "CGCXXABI.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace CodeGen;
@@ -163,6 +164,9 @@ static llvm::Constant *createAtExitStub(CodeGenModule &CGM,
CodeGenFunction CGF(CGM);
+ // Initialize debug info if needed.
+ CGF.maybeInitializeDebugInfo();
+
CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, fn,
CGM.getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
@@ -218,7 +222,7 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
- if (!CGM.getContext().getLangOpts().AppleKext) {
+ if (!CGM.getLangOpts().AppleKext) {
// Set the section if needed.
if (const char *Section =
CGM.getContext().getTargetInfo().getStaticInitSectionSpecifier())
@@ -228,8 +232,8 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
if (!CGM.getLangOpts().Exceptions)
Fn->setDoesNotThrow();
- if (CGM.getLangOpts().AddressSanitizer)
- Fn->addFnAttr(llvm::Attribute::AddressSafety);
+ if (CGM.getLangOpts().SanitizeAddress)
+ Fn->addFnAttr(llvm::Attributes::AddressSafety);
return Fn;
}
@@ -252,8 +256,7 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
DelayedCXXInitPosition.erase(D);
- }
- else {
+ } else {
llvm::DenseMap<const Decl *, unsigned>::iterator I =
DelayedCXXInitPosition.find(D);
if (I == DelayedCXXInitPosition.end()) {
@@ -276,28 +279,50 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
- // Create our global initialization function.
- llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
+ // Create our global initialization function.
if (!PrioritizedCXXGlobalInits.empty()) {
SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
- PrioritizedCXXGlobalInits.end());
- for (unsigned i = 0; i < PrioritizedCXXGlobalInits.size(); i++) {
- llvm::Function *Fn = PrioritizedCXXGlobalInits[i].second;
- LocalCXXGlobalInits.push_back(Fn);
- }
- LocalCXXGlobalInits.append(CXXGlobalInits.begin(), CXXGlobalInits.end());
- CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ PrioritizedCXXGlobalInits.end());
+ // Iterate over "chunks" of ctors with same priority and emit each chunk
+ // into separate function. Note - everything is sorted first by priority,
+ // second - by lex order, so we emit ctor functions in proper order.
+ for (SmallVectorImpl<GlobalInitData >::iterator
+ I = PrioritizedCXXGlobalInits.begin(),
+ E = PrioritizedCXXGlobalInits.end(); I != E; ) {
+ SmallVectorImpl<GlobalInitData >::iterator
+ PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
+
+ LocalCXXGlobalInits.clear();
+ unsigned Priority = I->first.priority;
+ // Compute the function suffix from priority. Prepend with zeroes to make
+ // sure the function names are also ordered as priorities.
+ std::string PrioritySuffix = llvm::utostr(Priority);
+ // Priority is always <= 65535 (enforced by sema)..
+ PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy,
+ "_GLOBAL__I_" + PrioritySuffix);
+
+ for (; I < PrioE; ++I)
+ LocalCXXGlobalInits.push_back(I->second);
+
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
&LocalCXXGlobalInits[0],
LocalCXXGlobalInits.size());
+ AddGlobalCtor(Fn, Priority);
+ }
}
- else
- CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
- &CXXGlobalInits[0],
- CXXGlobalInits.size());
+
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
+
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &CXXGlobalInits[0],
+ CXXGlobalInits.size());
AddGlobalCtor(Fn);
+
CXXGlobalInits.clear();
PrioritizedCXXGlobalInits.clear();
}
@@ -321,8 +346,9 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
llvm::GlobalVariable *Addr,
bool PerformInit) {
- if (CGM.getModuleDebugInfo() && !D->hasAttr<NoDebugAttr>())
- DebugInfo = CGM.getModuleDebugInfo();
+ // Check if we need to emit debug info for variable initializer.
+ if (!D->hasAttr<NoDebugAttr>())
+ maybeInitializeDebugInfo();
StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
@@ -344,6 +370,9 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
llvm::Constant **Decls,
unsigned NumDecls) {
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
+
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
@@ -369,6 +398,9 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
&DtorsAndObjects) {
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
+
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
@@ -405,6 +437,9 @@ CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
llvm::Function *fn =
CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
+
StartFunction(GlobalDecl(), getContext().VoidTy, fn, FI, args,
SourceLocation());
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index ba9c296..86dee5a 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -307,14 +307,15 @@ static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
/// aggressive about only using the ObjC++ personality in a function
/// when it really needs it.
void CodeGenModule::SimplifyPersonality() {
- // For now, this is really a Darwin-specific operation.
- if (!Context.getTargetInfo().getTriple().isOSDarwin())
- return;
-
// If we're not in ObjC++ -fexceptions, there's nothing to do.
if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
return;
+ // Both the problem this endeavors to fix and the way the logic
+ // above works is specific to the NeXT runtime.
+ if (!LangOpts.ObjCRuntime.isNeXTFamily())
+ return;
+
const EHPersonality &ObjCXX = EHPersonality::get(LangOpts);
const EHPersonality &CXX = getCXXPersonality(LangOpts);
if (&ObjCXX == &CXX)
@@ -534,7 +535,7 @@ static void emitFilterDispatchBlock(CodeGenFunction &CGF,
llvm::Value *zero = CGF.Builder.getInt32(0);
llvm::Value *failsFilter =
CGF.Builder.CreateICmpSLT(selector, zero, "ehspec.fails");
- CGF.Builder.CreateCondBr(failsFilter, unexpectedBB, CGF.getEHResumeBlock());
+ CGF.Builder.CreateCondBr(failsFilter, unexpectedBB, CGF.getEHResumeBlock(false));
CGF.EmitBlock(unexpectedBB);
}
@@ -614,7 +615,7 @@ CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
// The dispatch block for the end of the scope chain is a block that
// just resumes unwinding.
if (si == EHStack.stable_end())
- return getEHResumeBlock();
+ return getEHResumeBlock(true);
// Otherwise, we should look at the actual scope.
EHScope &scope = *EHStack.find(si);
@@ -1546,7 +1547,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
return TerminateHandler;
}
-llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
+llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
if (EHResumeBlock) return EHResumeBlock;
CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
@@ -1560,7 +1561,7 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
const char *RethrowName = Personality.CatchallRethrowFn;
- if (RethrowName != 0) {
+ if (RethrowName != 0 && !isCleanup) {
Builder.CreateCall(getCatchallRethrowFn(*this, RethrowName),
getExceptionFromSlot())
->setDoesNotReturn();
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 1fe4c18..63cc5b5 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -26,7 +26,8 @@
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
#include "llvm/MDBuilder.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/ADT/Hashing.h"
using namespace clang;
using namespace CodeGen;
@@ -156,50 +157,6 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
}
}
-namespace {
-/// \brief An adjustment to be made to the temporary created when emitting a
-/// reference binding, which accesses a particular subobject of that temporary.
- struct SubobjectAdjustment {
- enum {
- DerivedToBaseAdjustment,
- FieldAdjustment,
- MemberPointerAdjustment
- } Kind;
-
- union {
- struct {
- const CastExpr *BasePath;
- const CXXRecordDecl *DerivedClass;
- } DerivedToBase;
-
- FieldDecl *Field;
-
- struct {
- const MemberPointerType *MPT;
- llvm::Value *Ptr;
- } Ptr;
- };
-
- SubobjectAdjustment(const CastExpr *BasePath,
- const CXXRecordDecl *DerivedClass)
- : Kind(DerivedToBaseAdjustment) {
- DerivedToBase.BasePath = BasePath;
- DerivedToBase.DerivedClass = DerivedClass;
- }
-
- SubobjectAdjustment(FieldDecl *Field)
- : Kind(FieldAdjustment) {
- this->Field = Field;
- }
-
- SubobjectAdjustment(const MemberPointerType *MPT, llvm::Value *Ptr)
- : Kind(MemberPointerAdjustment) {
- this->Ptr.MPT = MPT;
- this->Ptr.Ptr = Ptr;
- }
- };
-}
-
static llvm::Value *
CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
const NamedDecl *InitializedDecl) {
@@ -232,32 +189,18 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
QualType &ObjCARCReferenceLifetimeType,
const NamedDecl *InitializedDecl) {
- // Look through single-element init lists that claim to be lvalues. They're
- // just syntactic wrappers in this case.
- if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
- if (ILE->getNumInits() == 1 && ILE->isGLValue())
- E = ILE->getInit(0);
- }
-
- // Look through expressions for materialized temporaries (for now).
- if (const MaterializeTemporaryExpr *M
- = dyn_cast<MaterializeTemporaryExpr>(E)) {
- // Objective-C++ ARC:
- // If we are binding a reference to a temporary that has ownership, we
- // need to perform retain/release operations on the temporary.
- if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
- E->getType()->isObjCLifetimeType() &&
- (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
- E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
- E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
- ObjCARCReferenceLifetimeType = E->getType();
-
- E = M->GetTemporaryExpr();
- }
+ const MaterializeTemporaryExpr *M = NULL;
+ E = E->findMaterializedTemporary(M);
+ // Objective-C++ ARC:
+ // If we are binding a reference to a temporary that has ownership, we
+ // need to perform retain/release operations on the temporary.
+ if (M && CGF.getLangOpts().ObjCAutoRefCount &&
+ M->getType()->isObjCLifetimeType() &&
+ (M->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
+ M->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
+ M->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
+ ObjCARCReferenceLifetimeType = M->getType();
- if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
- E = DAE->getExpr();
-
if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
CGF.enterFullExpression(EWC);
CodeGenFunction::RunCleanupsScope Scope(CGF);
@@ -335,54 +278,13 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
return ReferenceTemporary;
}
-
- SmallVector<SubobjectAdjustment, 2> Adjustments;
- while (true) {
- E = E->IgnoreParens();
-
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if ((CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase) &&
- E->getType()->isRecordType()) {
- E = CE->getSubExpr();
- CXXRecordDecl *Derived
- = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
- Adjustments.push_back(SubobjectAdjustment(CE, Derived));
- continue;
- }
-
- if (CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- if (!ME->isArrow() && ME->getBase()->isRValue()) {
- assert(ME->getBase()->getType()->isRecordType());
- if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
- E = ME->getBase();
- Adjustments.push_back(SubobjectAdjustment(Field));
- continue;
- }
- }
- } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
- if (BO->isPtrMemOp()) {
- assert(BO->getLHS()->isRValue());
- E = BO->getLHS();
- const MemberPointerType *MPT =
- BO->getRHS()->getType()->getAs<MemberPointerType>();
- llvm::Value *Ptr = CGF.EmitScalarExpr(BO->getRHS());
- Adjustments.push_back(SubobjectAdjustment(MPT, Ptr));
- }
- }
- if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
- if (opaque->getType()->isRecordType())
- return CGF.EmitOpaqueValueLValue(opaque).getAddress();
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ E = E->skipRValueSubobjectAdjustments(Adjustments);
+ if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
+ if (opaque->getType()->isRecordType())
+ return CGF.EmitOpaqueValueLValue(opaque).getAddress();
- // Nothing changed.
- break;
- }
-
// Create a reference temporary if necessary.
AggValueSlot AggSlot = AggValueSlot::ignored();
if (CGF.hasAggregateLLVMType(E->getType()) &&
@@ -446,8 +348,9 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
}
case SubobjectAdjustment::MemberPointerAdjustment: {
+ llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS);
Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
- CGF, Object, Adjustment.Ptr.Ptr, Adjustment.Ptr.MPT);
+ CGF, Object, Ptr, Adjustment.Ptr.MPT);
break;
}
}
@@ -486,6 +389,15 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
ReferenceTemporaryDtor,
ObjCARCReferenceLifetimeType,
InitializedDecl);
+ if (SanitizePerformTypeCheck && !E->getType()->isFunctionType()) {
+ // C++11 [dcl.ref]p5 (as amended by core issue 453):
+ // If a glvalue to which a reference is directly bound designates neither
+ // an existing object or function of an appropriate type nor a region of
+ // storage of suitable size and alignment to contain an object of the
+ // reference's type, the behavior is undefined.
+ QualType Ty = E->getType();
+ EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
+ }
if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
return RValue::get(Value);
@@ -549,22 +461,133 @@ unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
->getZExtValue();
}
-void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
- if (!CatchUndefined)
+/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
+static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
+ llvm::Value *High) {
+ llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
+ llvm::Value *K47 = Builder.getInt64(47);
+ llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
+ llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
+ llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
+ llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
+ return Builder.CreateMul(B1, KMul);
+}
+
+void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
+ llvm::Value *Address,
+ QualType Ty, CharUnits Alignment) {
+ if (!SanitizePerformTypeCheck)
return;
- // This needs to be to the standard address space.
- Address = Builder.CreateBitCast(Address, Int8PtrTy);
+ // Don't check pointers outside the default address space. The null check
+ // isn't correct, the object-size check isn't supported by LLVM, and we can't
+ // communicate the addresses to the runtime handler for the vptr check.
+ if (Address->getType()->getPointerAddressSpace())
+ return;
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
+ llvm::Value *Cond = 0;
- llvm::Value *Min = Builder.getFalse();
- llvm::Value *C = Builder.CreateCall2(F, Address, Min);
- llvm::BasicBlock *Cont = createBasicBlock();
- Builder.CreateCondBr(Builder.CreateICmpUGE(C,
- llvm::ConstantInt::get(IntPtrTy, Size)),
- Cont, getTrapBB());
- EmitBlock(Cont);
+ if (getLangOpts().SanitizeNull) {
+ // The glvalue must not be an empty glvalue.
+ Cond = Builder.CreateICmpNE(
+ Address, llvm::Constant::getNullValue(Address->getType()));
+ }
+
+ if (getLangOpts().SanitizeObjectSize && !Ty->isIncompleteType()) {
+ uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
+
+ // The glvalue must refer to a large enough storage region.
+ // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
+ // to check this.
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
+ llvm::Value *Min = Builder.getFalse();
+ llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy);
+ llvm::Value *LargeEnough =
+ Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min),
+ llvm::ConstantInt::get(IntPtrTy, Size));
+ Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough;
+ }
+
+ uint64_t AlignVal = 0;
+
+ if (getLangOpts().SanitizeAlignment) {
+ AlignVal = Alignment.getQuantity();
+ if (!Ty->isIncompleteType() && !AlignVal)
+ AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
+
+ // The glvalue must be suitably aligned.
+ if (AlignVal) {
+ llvm::Value *Align =
+ Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy),
+ llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
+ llvm::Value *Aligned =
+ Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
+ Cond = Cond ? Builder.CreateAnd(Cond, Aligned) : Aligned;
+ }
+ }
+
+ if (Cond) {
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(Ty),
+ llvm::ConstantInt::get(SizeTy, AlignVal),
+ llvm::ConstantInt::get(Int8Ty, TCK)
+ };
+ EmitCheck(Cond, "type_mismatch", StaticData, Address);
+ }
+
+ // If possible, check that the vptr indicates that there is a subobject of
+ // type Ty at offset zero within this object.
+ CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (getLangOpts().SanitizeVptr && TCK != TCK_ConstructorCall &&
+ RD && RD->hasDefinition() && RD->isDynamicClass()) {
+ // Compute a hash of the mangled name of the type.
+ //
+ // FIXME: This is not guaranteed to be deterministic! Move to a
+ // fingerprinting mechanism once LLVM provides one. For the time
+ // being the implementation happens to be deterministic.
+ llvm::SmallString<64> MangledName;
+ llvm::raw_svector_ostream Out(MangledName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
+ Out);
+ llvm::hash_code TypeHash = hash_value(Out.str());
+
+ // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
+ llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
+ llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
+ llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy);
+ llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
+ llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
+
+ llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
+ Hash = Builder.CreateTrunc(Hash, IntPtrTy);
+
+ // Look the hash up in our cache.
+ const int CacheSize = 128;
+ llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
+ llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
+ "__ubsan_vptr_type_cache");
+ llvm::Value *Slot = Builder.CreateAnd(Hash,
+ llvm::ConstantInt::get(IntPtrTy,
+ CacheSize-1));
+ llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
+ llvm::Value *CacheVal =
+ Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices));
+
+ // If the hash isn't in the cache, call a runtime handler to perform the
+ // hard work of checking whether the vptr is for an object of the right
+ // type. This will either fill in the cache and return, or produce a
+ // diagnostic.
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(Ty),
+ CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
+ llvm::ConstantInt::get(Int8Ty, TCK)
+ };
+ llvm::Value *DynamicData[] = { Address, Hash };
+ EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash),
+ "dynamic_type_cache_miss", StaticData, DynamicData, true);
+ }
}
@@ -641,11 +664,11 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
}
-LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
+LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
LValue LV = EmitLValue(E);
if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
- EmitCheck(LV.getAddress(),
- getContext().getTypeSizeInChars(E->getType()).getQuantity());
+ EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(),
+ E->getType(), LV.getAlignment());
return LV;
}
@@ -672,7 +695,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
llvm_unreachable("cannot emit a property reference directly");
case Expr::ObjCSelectorExprClass:
- return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
+ return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
case Expr::ObjCIsaExprClass:
return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
case Expr::BinaryOperatorClass:
@@ -709,6 +732,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
case Expr::CXXBindTemporaryExprClass:
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+ case Expr::CXXUuidofExprClass:
+ return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
case Expr::LambdaExprClass:
return EmitLambdaLValue(cast<LambdaExpr>(E));
@@ -1124,7 +1149,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
// Get the output type.
llvm::Type *ResLTy = ConvertType(LV.getType());
- unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+ unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy);
// Compute the result as an OR of all of the individual component accesses.
llvm::Value *Res = 0;
@@ -1322,7 +1347,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Get the output type.
llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
- unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+ unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy);
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
@@ -1645,6 +1670,21 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
CharUnits Alignment = getContext().getDeclAlign(ND);
QualType T = E->getType();
+ // A DeclRefExpr for a reference initialized by a constant expression can
+ // appear without being odr-used. Directly emit the constant initializer.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ const Expr *Init = VD->getAnyInitializer(VD);
+ if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
+ VD->isUsableInConstantExpressions(getContext()) &&
+ VD->checkInitIsICE()) {
+ llvm::Constant *Val =
+ CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this);
+ assert(Val && "failed to emit reference constant expression");
+ // FIXME: Eventually we will want to emit vector element references.
+ return MakeAddrLValue(Val, T, Alignment);
+ }
+ }
+
// FIXME: We should be able to assert this for FunctionDecls as well!
// FIXME: We should be able to assert this for all DeclRefExprs, not just
// those with a valid source location.
@@ -1655,7 +1695,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (ND->hasAttr<WeakRefAttr>()) {
const ValueDecl *VD = cast<ValueDecl>(ND);
llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
- return MakeAddrLValue(Aliasee, E->getType(), Alignment);
+ return MakeAddrLValue(Aliasee, T, Alignment);
}
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
@@ -1683,9 +1723,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
- CharUnits alignment = getContext().getDeclAlign(VD);
return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
- E->getType(), alignment);
+ T, Alignment);
}
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
@@ -1736,8 +1775,8 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
// But, we continue to generate __strong write barrier on indirect write
// into a pointer to object.
- if (getContext().getLangOpts().ObjC1 &&
- getContext().getLangOpts().getGC() != LangOptions::NonGC &&
+ if (getLangOpts().ObjC1 &&
+ getLangOpts().getGC() != LangOptions::NonGC &&
LV.isObjCWeak())
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
return LV;
@@ -1815,8 +1854,9 @@ GetAddrOfConstantWideString(StringRef Str,
static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
SmallString<32>& Target) {
Target.resize(CharByteWidth * (Source.size() + 1));
- char* ResultPtr = &Target[0];
- bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr);
+ char *ResultPtr = &Target[0];
+ const UTF8 *ErrorPtr;
+ bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
(void)success;
assert(success);
Target.resize(ResultPtr - &Target[0]);
@@ -1888,33 +1928,167 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
}
}
-llvm::BasicBlock *CodeGenFunction::getTrapBB() {
- const CodeGenOptions &GCO = CGM.getCodeGenOpts();
+/// Emit a type description suitable for use by a runtime sanitizer library. The
+/// format of a type descriptor is
+///
+/// \code
+/// { i16 TypeKind, i16 TypeInfo }
+/// \endcode
+///
+/// followed by an array of i8 containing the type name. TypeKind is 0 for an
+/// integer, 1 for a floating point value, and -1 for anything else.
+llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
+ // FIXME: Only emit each type's descriptor once.
+ uint16_t TypeKind = -1;
+ uint16_t TypeInfo = 0;
+
+ if (T->isIntegerType()) {
+ TypeKind = 0;
+ TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
+ T->isSignedIntegerType();
+ } else if (T->isFloatingType()) {
+ TypeKind = 1;
+ TypeInfo = getContext().getTypeSize(T);
+ }
+
+ // Format the type name as if for a diagnostic, including quotes and
+ // optionally an 'aka'.
+ llvm::SmallString<32> Buffer;
+ CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
+ (intptr_t)T.getAsOpaquePtr(),
+ 0, 0, 0, 0, 0, 0, Buffer,
+ ArrayRef<intptr_t>());
+
+ llvm::Constant *Components[] = {
+ Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
+ llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
+ };
+ llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Descriptor->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ Descriptor);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
- // If we are not optimzing, don't collapse all calls to trap in the function
- // to the same call, that way, in the debugger they can see which operation
- // did in fact fail. If we are optimizing, we collapse all calls to trap down
- // to just one per function to save on codesize.
- if (GCO.OptimizationLevel && TrapBB)
- return TrapBB;
+llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
+ llvm::Type *TargetTy = IntPtrTy;
+
+ // Integers which fit in intptr_t are zero-extended and passed directly.
+ if (V->getType()->isIntegerTy() &&
+ V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
+ return Builder.CreateZExt(V, TargetTy);
+
+ // Pointers are passed directly, everything else is passed by address.
+ if (!V->getType()->isPointerTy()) {
+ llvm::Value *Ptr = Builder.CreateAlloca(V->getType());
+ Builder.CreateStore(V, Ptr);
+ V = Ptr;
+ }
+ return Builder.CreatePtrToInt(V, TargetTy);
+}
+
+/// \brief Emit a representation of a SourceLocation for passing to a handler
+/// in a sanitizer runtime library. The format for this data is:
+/// \code
+/// struct SourceLocation {
+/// const char *Filename;
+/// int32_t Line, Column;
+/// };
+/// \endcode
+/// For an invalid SourceLocation, the Filename pointer is null.
+llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
+ PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
+
+ llvm::Constant *Data[] = {
+ // FIXME: Only emit each file name once.
+ PLoc.isValid() ? cast<llvm::Constant>(
+ Builder.CreateGlobalStringPtr(PLoc.getFilename()))
+ : llvm::Constant::getNullValue(Int8PtrTy),
+ Builder.getInt32(PLoc.getLine()),
+ Builder.getInt32(PLoc.getColumn())
+ };
- llvm::BasicBlock *Cont = 0;
- if (HaveInsertPoint()) {
- Cont = createBasicBlock("cont");
- EmitBranch(Cont);
+ return llvm::ConstantStruct::getAnon(Data);
+}
+
+void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName,
+ llvm::ArrayRef<llvm::Constant *> StaticArgs,
+ llvm::ArrayRef<llvm::Value *> DynamicArgs,
+ bool Recoverable) {
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+
+ llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName);
+ Builder.CreateCondBr(Checked, Cont, Handler);
+ EmitBlock(Handler);
+
+ llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
+ llvm::GlobalValue *InfoPtr =
+ new llvm::GlobalVariable(CGM.getModule(), Info->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, Info);
+ InfoPtr->setUnnamedAddr(true);
+
+ llvm::SmallVector<llvm::Value *, 4> Args;
+ llvm::SmallVector<llvm::Type *, 4> ArgTypes;
+ Args.reserve(DynamicArgs.size() + 1);
+ ArgTypes.reserve(DynamicArgs.size() + 1);
+
+ // Handler functions take an i8* pointing to the (handler-specific) static
+ // information block, followed by a sequence of intptr_t arguments
+ // representing operand values.
+ Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
+ ArgTypes.push_back(Int8PtrTy);
+ for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
+ Args.push_back(EmitCheckValue(DynamicArgs[i]));
+ ArgTypes.push_back(IntPtrTy);
+ }
+
+ llvm::FunctionType *FnType =
+ llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
+ llvm::AttrBuilder B;
+ if (!Recoverable) {
+ B.addAttribute(llvm::Attributes::NoReturn)
+ .addAttribute(llvm::Attributes::NoUnwind);
+ }
+ B.addAttribute(llvm::Attributes::UWTable);
+ llvm::Value *Fn = CGM.CreateRuntimeFunction(FnType,
+ ("__ubsan_handle_" + CheckName).str(),
+ llvm::Attributes::get(getLLVMContext(),
+ B));
+ llvm::CallInst *HandlerCall = Builder.CreateCall(Fn, Args);
+ if (Recoverable) {
+ Builder.CreateBr(Cont);
+ } else {
+ HandlerCall->setDoesNotReturn();
+ HandlerCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
}
- TrapBB = createBasicBlock("trap");
- EmitBlock(TrapBB);
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
- llvm::CallInst *TrapCall = Builder.CreateCall(F);
- TrapCall->setDoesNotReturn();
- TrapCall->setDoesNotThrow();
- Builder.CreateUnreachable();
+ EmitBlock(Cont);
+}
- if (Cont)
- EmitBlock(Cont);
- return TrapBB;
+void CodeGenFunction::EmitTrapvCheck(llvm::Value *Checked) {
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+
+ // If we're optimizing, collapse all calls to trap down to just one per
+ // function to save on code size.
+ if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
+ TrapBB = createBasicBlock("trap");
+ Builder.CreateCondBr(Checked, Cont, TrapBB);
+ EmitBlock(TrapBB);
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
+ llvm::CallInst *TrapCall = Builder.CreateCall(F);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+ } else {
+ Builder.CreateCondBr(Checked, Cont, TrapBB);
+ }
+
+ EmitBlock(Cont);
}
/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
@@ -2007,14 +2181,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Propagate the alignment from the array itself to the result.
ArrayAlignment = ArrayLV.getAlignment();
- if (getContext().getLangOpts().isSignedOverflowDefined())
+ if (getLangOpts().isSignedOverflowDefined())
Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
else
Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
} else {
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
- if (getContext().getLangOpts().isSignedOverflowDefined())
+ if (getLangOpts().isSignedOverflowDefined())
Address = Builder.CreateGEP(Base, Idx, "arrayidx");
else
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
@@ -2037,8 +2211,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
- if (getContext().getLangOpts().ObjC1 &&
- getContext().getLangOpts().getGC() != LangOptions::NonGC) {
+ if (getLangOpts().ObjC1 &&
+ getLangOpts().getGC() != LangOptions::NonGC) {
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
setObjCGCLValueClass(getContext(), E, LV);
}
@@ -2114,11 +2288,13 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
LValue BaseLV;
- if (E->isArrow())
- BaseLV = MakeNaturalAlignAddrLValue(EmitScalarExpr(BaseExpr),
- BaseExpr->getType()->getPointeeType());
- else
- BaseLV = EmitLValue(BaseExpr);
+ if (E->isArrow()) {
+ llvm::Value *Ptr = EmitScalarExpr(BaseExpr);
+ QualType PtrTy = BaseExpr->getType()->getPointeeType();
+ EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy);
+ BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy);
+ } else
+ BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
NamedDecl *ND = E->getMemberDecl();
if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
@@ -2355,7 +2531,10 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_Dependent:
llvm_unreachable("dependent cast kind in IR gen!");
-
+
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
// These two casts are currently treated as no-ops, although they could
// potentially be real operations depending on the target's ABI.
case CK_NonAtomicToAtomic:
@@ -2546,7 +2725,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const CXXPseudoDestructorExpr *PseudoDtor
= dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
QualType DestroyedType = PseudoDtor->getDestroyedType();
- if (getContext().getLangOpts().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
DestroyedType->isObjCLifetimeType() &&
(DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
@@ -2635,7 +2814,7 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
}
RValue RV = EmitAnyExpr(E->getRHS());
- LValue LV = EmitLValue(E->getLHS());
+ LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
EmitStoreThroughLValue(RV, LV);
return LV;
}
@@ -2677,6 +2856,14 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
}
+llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
+ return CGM.GetAddrOfUuidDescriptor(E);
+}
+
+LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
+ return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType());
+}
+
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
@@ -2977,11 +3164,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
uint64_t Size = sizeChars.getQuantity();
CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
unsigned Align = alignChars.getQuantity();
- unsigned MaxInlineWidth =
- getContext().getTargetInfo().getMaxAtomicInlineWidth();
- bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
-
-
+ unsigned MaxInlineWidthInBits =
+ getContext().getTargetInfo().getMaxAtomicInlineWidth();
+ bool UseLibcall = (Size != Align ||
+ getContext().toBits(sizeChars) > MaxInlineWidthInBits);
llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
Ptr = EmitScalarExpr(E->getPtr());
@@ -3177,6 +3363,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
return ConvertTempToRValue(*this, E->getType(), Dest);
}
+ bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store_n;
+ bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load_n;
+
llvm::Type *IPtrTy =
llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
llvm::Value *OrigDest = Dest;
@@ -3194,14 +3387,20 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
break;
case 1: // memory_order_consume
case 2: // memory_order_acquire
+ if (IsStore)
+ break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Acquire);
break;
case 3: // memory_order_release
+ if (IsLoad)
+ break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Release);
break;
case 4: // memory_order_acq_rel
+ if (IsLoad || IsStore)
+ break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::AcquireRelease);
break;
@@ -3221,13 +3420,6 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Long case, when Order isn't obviously constant.
- bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
- E->getOp() == AtomicExpr::AO__atomic_store ||
- E->getOp() == AtomicExpr::AO__atomic_store_n;
- bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
- E->getOp() == AtomicExpr::AO__atomic_load ||
- E->getOp() == AtomicExpr::AO__atomic_load_n;
-
// Create all the relevant BB's
llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
*AcqRelBB = 0, *SeqCstBB = 0;
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 61f7362..718e8f9 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -549,8 +549,10 @@ AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
switch (E->getCastKind()) {
case CK_Dynamic: {
+ // FIXME: Can this actually happen? We have no test coverage for it.
assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
- LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
+ LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
+ CodeGenFunction::TCK_Load);
// FIXME: Do we also need to handle property references here?
if (LV.isSimple())
CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
@@ -645,6 +647,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -771,7 +774,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
Visit(E->getRHS());
// Now emit the LHS and copy into it.
- LValue LHS = CGF.EmitLValue(E->getLHS());
+ LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
EmitCopy(E->getLHS()->getType(),
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
@@ -1205,7 +1208,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
// C++ objects with a user-declared constructor don't need zero'ing.
- if (CGF.getContext().getLangOpts().CPlusPlus)
+ if (CGF.getLangOpts().CPlusPlus)
if (const RecordType *RT = CGF.getContext()
.getBaseElementType(E->getType())->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
@@ -1271,10 +1274,11 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
bool isVolatile,
- CharUnits alignment) {
+ CharUnits alignment,
+ bool isAssignment) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
- if (getContext().getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
assert((Record->hasTrivialCopyConstructor() ||
@@ -1300,9 +1304,13 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
- // Get size and alignment info for this aggregate.
- std::pair<CharUnits, CharUnits> TypeInfo =
- getContext().getTypeInfoInChars(Ty);
+ // Get data size and alignment info for this aggregate. If this is an
+ // assignment don't copy the tail padding. Otherwise copying it is fine.
+ std::pair<CharUnits, CharUnits> TypeInfo;
+ if (isAssignment)
+ TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
+ else
+ TypeInfo = getContext().getTypeInfoInChars(Ty);
if (alignment.isZero())
alignment = TypeInfo.second;
@@ -1359,11 +1367,17 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
}
}
}
+
+ // Determine the metadata to describe the position of any padding in this
+ // memcpy, as well as the TBAA tags for the members of the struct, in case
+ // the optimizer wishes to expand it in to scalar memory operations.
+ llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty);
Builder.CreateMemCpy(DestPtr, SrcPtr,
llvm::ConstantInt::get(IntPtrTy,
TypeInfo.first.getQuantity()),
- alignment.getQuantity(), isVolatile);
+ alignment.getQuantity(), isVolatile,
+ /*TBAATag=*/0, TBAAStructTag);
}
void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 31ea1b5..7f640f6 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -24,6 +24,7 @@ using namespace clang;
using namespace CodeGen;
RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
+ SourceLocation CallLoc,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
llvm::Value *This,
@@ -33,6 +34,13 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
+ // C++11 [class.mfct.non-static]p2:
+ // If a non-static member function of a class X is called for an object that
+ // is not of type X, or of a type derived from X, the behavior is undefined.
+ EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
+ : TCK_MemberCall,
+ CallLoc, This, getContext().getRecordType(MD->getParent()));
+
CallArgList Args;
// Push the this ptr.
@@ -168,8 +176,9 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
CGDebugInfo *DI = getDebugInfo();
- if (DI && CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo
- && !isa<CallExpr>(ME->getBase())) {
+ if (DI &&
+ CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo &&
+ !isa<CallExpr>(ME->getBase())) {
QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
DI->getOrCreateRecordType(PTy->getPointeeType(),
@@ -235,7 +244,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
// We don't like to generate the trivial copy/move assignment operator
// when it isn't necessary; just produce the proper effect here.
llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitAggregateCopy(This, RHS, CE->getType());
+ EmitAggregateAssign(This, RHS, CE->getType());
return RValue::get(This);
}
@@ -251,16 +260,16 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
}
// Compute the function type we're calling.
+ const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
const CGFunctionInfo *FInfo = 0;
- if (isa<CXXDestructorDecl>(MD))
- FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
+ if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
+ FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor,
Dtor_Complete);
- else if (isa<CXXConstructorDecl>(MD))
- FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
- cast<CXXConstructorDecl>(MD),
- Ctor_Complete);
+ else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
+ FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor,
+ Ctor_Complete);
else
- FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
+ FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
@@ -277,7 +286,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (UseVirtualCall) {
Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
} else {
- if (getContext().getLangOpts().AppleKext &&
+ if (getLangOpts().AppleKext &&
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
@@ -295,7 +304,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
} else if (UseVirtualCall) {
Callee = BuildVirtualCall(MD, This, Ty);
} else {
- if (getContext().getLangOpts().AppleKext &&
+ if (getLangOpts().AppleKext &&
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
@@ -306,8 +315,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
}
}
- return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
- CE->arg_begin(), CE->arg_end());
+ return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
+ /*VTT=*/0, CE->arg_begin(), CE->arg_end());
}
RValue
@@ -337,6 +346,9 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
else
This = EmitLValue(BaseExpr).getAddress();
+ EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
+ QualType(MPT->getClass(), 0));
+
// Ask the ABI to load the callee. Note that This is modified.
llvm::Value *Callee =
CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
@@ -370,13 +382,13 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
MD->isTrivial()) {
llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
QualType Ty = E->getType();
- EmitAggregateCopy(This, Src, Ty);
+ EmitAggregateAssign(This, Src, Ty);
return RValue::get(This);
}
llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
- return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
- E->arg_begin() + 1, E->arg_end());
+ return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
+ /*VTT=*/0, E->arg_begin() + 1, E->arg_end());
}
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
@@ -457,7 +469,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
// Elide the constructor if we're constructing from a temporary.
// The temporary check is required because Sema sets this on NRVO
// returns.
- if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
+ if (getLangOpts().ElideConstructors && E->isElidable()) {
assert(getContext().hasSameUnqualifiedType(E->getType(),
E->getArg(0)->getType()));
if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
@@ -878,7 +890,7 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
if (constNum->getZExtValue() <= initializerElements) {
// If there was a cleanup, deactivate it.
if (cleanupDominator)
- DeactivateCleanupBlock(cleanup, cleanupDominator);;
+ DeactivateCleanupBlock(cleanup, cleanupDominator);
return;
}
} else {
@@ -949,7 +961,6 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
if (E->isArray()) {
if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
CXXConstructorDecl *Ctor = CCE->getConstructor();
- bool RequiresZeroInitialization = false;
if (Ctor->isTrivial()) {
// If new expression did not specify value-initialization, then there
// is no initialization.
@@ -962,13 +973,11 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
return;
}
-
- RequiresZeroInitialization = true;
}
CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
CCE->arg_begin(), CCE->arg_end(),
- RequiresZeroInitialization);
+ CCE->requiresZeroInitialization());
return;
} else if (Init && isa<ImplicitValueInitExpr>(Init) &&
CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
@@ -1230,8 +1239,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::BasicBlock *contBB = 0;
llvm::Value *allocation = RV.getScalarVal();
- unsigned AS =
- cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
+ unsigned AS = allocation->getType()->getPointerAddressSpace();
// The null-check means that the initializer is conditionally
// evaluated.
@@ -1377,8 +1385,14 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
if (UseGlobalDelete) {
// If we're supposed to call the global delete, make sure we do so
// even if the destructor throws.
+
+ // Derive the complete-object pointer, which is what we need
+ // to pass to the deallocation function.
+ llvm::Value *completePtr =
+ CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
+
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
- Ptr, OperatorDelete,
+ completePtr, OperatorDelete,
ElementType);
}
@@ -1390,8 +1404,9 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
= CGF.BuildVirtualCall(Dtor,
UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
Ptr, Ty);
- CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
- 0, 0);
+ // FIXME: Provide a source location here.
+ CGF.EmitCXXMemberCall(Dtor, SourceLocation(), Callee, ReturnValueSlot(),
+ Ptr, /*VTT=*/0, 0, 0);
if (UseGlobalDelete) {
CGF.PopCleanupBlock();
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 0233745..66b6f86 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -427,6 +427,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
@@ -640,7 +641,7 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
LValue LV = EmitCompoundAssignLValue(E, Func, Val);
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOpts().CPlusPlus)
+ if (!CGF.getLangOpts().CPlusPlus)
return Val;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -675,7 +676,7 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
LValue LV = EmitBinAssignLValue(E, Val);
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOpts().CPlusPlus)
+ if (!CGF.getLangOpts().CPlusPlus)
return Val;
// If the lvalue is non-volatile, return the computed value of the assignment.
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index a17a436..206f74a 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -24,7 +24,7 @@
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -79,12 +79,12 @@ private:
CharUnits getAlignment(const llvm::Constant *C) const {
if (Packed) return CharUnits::One();
return CharUnits::fromQuantity(
- CGM.getTargetData().getABITypeAlignment(C->getType()));
+ CGM.getDataLayout().getABITypeAlignment(C->getType()));
}
CharUnits getSizeInChars(const llvm::Constant *C) const {
return CharUnits::fromQuantity(
- CGM.getTargetData().getTypeAllocSize(C->getType()));
+ CGM.getDataLayout().getTypeAllocSize(C->getType()));
}
};
@@ -204,7 +204,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
if (!FitsCompletelyInPreviousByte) {
unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
- if (CGM.getTargetData().isBigEndian()) {
+ if (CGM.getDataLayout().isBigEndian()) {
Tmp = Tmp.lshr(NewFieldWidth);
Tmp = Tmp.trunc(BitsInPreviousByte);
@@ -220,7 +220,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
}
Tmp = Tmp.zext(CharWidth);
- if (CGM.getTargetData().isBigEndian()) {
+ if (CGM.getDataLayout().isBigEndian()) {
if (FitsCompletelyInPreviousByte)
Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
} else {
@@ -269,7 +269,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
while (FieldValue.getBitWidth() > CharWidth) {
llvm::APInt Tmp;
- if (CGM.getTargetData().isBigEndian()) {
+ if (CGM.getDataLayout().isBigEndian()) {
// We want the high bits.
Tmp =
FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
@@ -292,7 +292,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
"Should not have more than a byte left!");
if (FieldValue.getBitWidth() < CharWidth) {
- if (CGM.getTargetData().isBigEndian()) {
+ if (CGM.getDataLayout().isBigEndian()) {
unsigned BitWidth = FieldValue.getBitWidth();
FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
@@ -337,7 +337,7 @@ void ConstStructBuilder::ConvertStructToPacked() {
llvm::Constant *C = Elements[i];
CharUnits ElementAlign = CharUnits::fromQuantity(
- CGM.getTargetData().getABITypeAlignment(C->getType()));
+ CGM.getDataLayout().getABITypeAlignment(C->getType()));
CharUnits AlignedElementOffsetInChars =
ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
@@ -379,7 +379,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
unsigned FieldNo = 0;
unsigned ElementNo = 0;
const FieldDecl *LastFD = 0;
- bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+ bool IsMsStruct = RD->isMsStruct(CGM.getContext());
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
@@ -478,7 +478,7 @@ void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
unsigned FieldNo = 0;
const FieldDecl *LastFD = 0;
- bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+ bool IsMsStruct = RD->isMsStruct(CGM.getContext());
uint64_t OffsetBits = CGM.getContext().toBits(Offset);
for (RecordDecl::field_iterator Field = RD->field_begin(),
@@ -665,8 +665,8 @@ public:
SmallVector<llvm::Type*, 2> Types;
Elts.push_back(C);
Types.push_back(C->getType());
- unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
- unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(destType);
+ unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(C->getType());
+ unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(destType);
assert(CurSize <= TotalSize && "Union size mismatch!");
if (unsigned NumPadBytes = TotalSize - CurSize) {
@@ -691,6 +691,9 @@ public:
case CK_Dependent: llvm_unreachable("saw dependent cast!");
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
case CK_ReinterpretMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
@@ -811,11 +814,7 @@ public:
return llvm::ConstantArray::get(AType, Elts);
}
- llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
- return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
- }
-
- llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
+ llvm::Constant *EmitRecordInitialization(InitListExpr *ILE) {
return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
}
@@ -828,10 +827,7 @@ public:
return EmitArrayInitialization(ILE);
if (ILE->getType()->isRecordType())
- return EmitStructInitialization(ILE);
-
- if (ILE->getType()->isUnionType())
- return EmitUnionInitialization(ILE);
+ return EmitRecordInitialization(ILE);
return 0;
}
@@ -999,6 +995,9 @@ public:
T = Typeid->getExprOperand()->getType();
return CGM.GetAddrOfRTTIDescriptor(T);
}
+ case Expr::CXXUuidofExprClass: {
+ return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
+ }
}
return 0;
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 1cccafe..b429b1d 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -28,7 +28,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/Module.h"
#include "llvm/Support/CFG.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <cstdarg>
using namespace clang;
@@ -45,6 +45,7 @@ struct BinOpInfo {
Value *RHS;
QualType Ty; // Computation Type.
BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
+ bool FPContractable;
const Expr *E; // Entire expr, for error unsupported. May not be binop.
};
@@ -80,7 +81,11 @@ public:
llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
- LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
+ LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
+ return CGF.EmitCheckedLValue(E, TCK);
+ }
+
+ void EmitBinOpCheck(Value *Check, const BinOpInfo &Info);
Value *EmitLoadOfLValue(LValue LV) {
return CGF.EmitLoadOfLValue(LV).getScalarVal();
@@ -90,13 +95,19 @@ public:
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
- return EmitLoadOfLValue(EmitCheckedLValue(E));
+ return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load));
}
/// EmitConversionToBool - Convert the specified expression value to a
/// boolean (i1) truth value. This is equivalent to "Val != 0".
Value *EmitConversionToBool(Value *Src, QualType DstTy);
+ /// \brief Emit a check that a conversion to or from a floating-point type
+ /// does not overflow.
+ void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
+ Value *Src, QualType SrcType,
+ QualType DstType, llvm::Type *DstTy);
+
/// EmitScalarConversion - Emit a conversion from the specified type to the
/// specified destination type, both of which are LLVM scalar types.
Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
@@ -391,34 +402,26 @@ public:
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ // Fall through.
case LangOptions::SOB_Trapping:
return EmitOverflowCheckedBinOp(Ops);
}
}
-
+
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
- bool isTrapvOverflowBehavior() {
- return CGF.getContext().getLangOpts().getSignedOverflowBehavior()
- == LangOptions::SOB_Trapping;
- }
/// Create a binary op that checks for overflow.
/// Currently only supports +, - and *.
Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
- // Emit the overflow BB when -ftrapv option is activated.
- void EmitOverflowBB(llvm::BasicBlock *overflowBB) {
- Builder.SetInsertPoint(overflowBB);
- llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
- Builder.CreateCall(Trap);
- Builder.CreateUnreachable();
- }
+
// Check for undefined division and modulus behaviors.
void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
llvm::Value *Zero,bool isDiv);
@@ -537,6 +540,110 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
return EmitPointerToBoolConversion(Src);
}
+void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc,
+ QualType OrigSrcType,
+ Value *Src, QualType SrcType,
+ QualType DstType,
+ llvm::Type *DstTy) {
+ using llvm::APFloat;
+ using llvm::APSInt;
+
+ llvm::Type *SrcTy = Src->getType();
+
+ llvm::Value *Check = 0;
+ if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
+ // Integer to floating-point. This can fail for unsigned short -> __half
+ // or unsigned __int128 -> float.
+ assert(DstType->isFloatingType());
+ bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
+
+ APFloat LargestFloat =
+ APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
+ APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
+
+ bool IsExact;
+ if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
+ &IsExact) != APFloat::opOK)
+ // The range of representable values of this floating point type includes
+ // all values of this integer type. Don't need an overflow check.
+ return;
+
+ llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
+ if (SrcIsUnsigned)
+ Check = Builder.CreateICmpULE(Src, Max);
+ else {
+ llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
+ llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
+ llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
+ Check = Builder.CreateAnd(GE, LE);
+ }
+ } else {
+ // Floating-point to integer or floating-point to floating-point. This has
+ // undefined behavior if the source is +-Inf, NaN, or doesn't fit into the
+ // destination type.
+ const llvm::fltSemantics &SrcSema =
+ CGF.getContext().getFloatTypeSemantics(OrigSrcType);
+ APFloat MaxSrc(SrcSema, APFloat::uninitialized);
+ APFloat MinSrc(SrcSema, APFloat::uninitialized);
+
+ if (isa<llvm::IntegerType>(DstTy)) {
+ unsigned Width = CGF.getContext().getIntWidth(DstType);
+ bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
+
+ APSInt Min = APSInt::getMinValue(Width, Unsigned);
+ if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
+ APFloat::opOverflow)
+ // Don't need an overflow check for lower bound. Just check for
+ // -Inf/NaN.
+ MinSrc = APFloat::getLargest(SrcSema, true);
+
+ APSInt Max = APSInt::getMaxValue(Width, Unsigned);
+ if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
+ APFloat::opOverflow)
+ // Don't need an overflow check for upper bound. Just check for
+ // +Inf/NaN.
+ MaxSrc = APFloat::getLargest(SrcSema, false);
+ } else {
+ const llvm::fltSemantics &DstSema =
+ CGF.getContext().getFloatTypeSemantics(DstType);
+ bool IsInexact;
+
+ MinSrc = APFloat::getLargest(DstSema, true);
+ if (MinSrc.convert(SrcSema, APFloat::rmTowardZero, &IsInexact) &
+ APFloat::opOverflow)
+ MinSrc = APFloat::getLargest(SrcSema, true);
+
+ MaxSrc = APFloat::getLargest(DstSema, false);
+ if (MaxSrc.convert(SrcSema, APFloat::rmTowardZero, &IsInexact) &
+ APFloat::opOverflow)
+ MaxSrc = APFloat::getLargest(SrcSema, false);
+ }
+
+ // If we're converting from __half, convert the range to float to match
+ // the type of src.
+ if (OrigSrcType->isHalfType()) {
+ const llvm::fltSemantics &Sema =
+ CGF.getContext().getFloatTypeSemantics(SrcType);
+ bool IsInexact;
+ MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ }
+
+ llvm::Value *GE =
+ Builder.CreateFCmpOGE(Src, llvm::ConstantFP::get(VMContext, MinSrc));
+ llvm::Value *LE =
+ Builder.CreateFCmpOLE(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
+ Check = Builder.CreateAnd(GE, LE);
+ }
+
+ // FIXME: Provide a SourceLocation.
+ llvm::Constant *StaticArgs[] = {
+ CGF.EmitCheckTypeDescriptor(OrigSrcType),
+ CGF.EmitCheckTypeDescriptor(DstType)
+ };
+ CGF.EmitCheck(Check, "float_cast_overflow", StaticArgs, OrigSrc);
+}
+
/// EmitScalarConversion - Emit a conversion from the specified type to the
/// specified destination type, both of which are LLVM scalar types.
Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
@@ -547,6 +654,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (DstType->isVoidType()) return 0;
+ llvm::Value *OrigSrc = Src;
+ QualType OrigSrcType = SrcType;
llvm::Type *SrcTy = Src->getType();
// Floating casts might be a bit special: if we're doing casts to / from half
@@ -620,6 +729,12 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
Value *Res = NULL;
llvm::Type *ResTy = DstTy;
+ // An overflowing conversion has undefined behavior if either the source type
+ // or the destination type is a floating-point type.
+ if (CGF.getLangOpts().SanitizeFloatCastOverflow &&
+ (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
+ EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy);
+
// Cast to half via float
if (DstType->isHalfType())
DstTy = CGF.FloatTy;
@@ -686,6 +801,54 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
return llvm::Constant::getNullValue(ConvertType(Ty));
}
+/// \brief Emit a sanitization check for the given "binary" operation (which
+/// might actually be a unary increment which has been lowered to a binary
+/// operation). The check passes if \p Check, which is an \c i1, is \c true.
+void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
+ StringRef CheckName;
+ llvm::SmallVector<llvm::Constant *, 4> StaticData;
+ llvm::SmallVector<llvm::Value *, 2> DynamicData;
+
+ BinaryOperatorKind Opcode = Info.Opcode;
+ if (BinaryOperator::isCompoundAssignmentOp(Opcode))
+ Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
+
+ StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
+ const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
+ if (UO && UO->getOpcode() == UO_Minus) {
+ CheckName = "negate_overflow";
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
+ DynamicData.push_back(Info.RHS);
+ } else {
+ if (BinaryOperator::isShiftOp(Opcode)) {
+ // Shift LHS negative or too large, or RHS out of bounds.
+ CheckName = "shift_out_of_bounds";
+ const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
+ StaticData.push_back(
+ CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
+ StaticData.push_back(
+ CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
+ } else if (Opcode == BO_Div || Opcode == BO_Rem) {
+ // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
+ CheckName = "divrem_overflow";
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.E->getType()));
+ } else {
+ // Signed arithmetic overflow (+, -, *).
+ switch (Opcode) {
+ case BO_Add: CheckName = "add_overflow"; break;
+ case BO_Sub: CheckName = "sub_overflow"; break;
+ case BO_Mul: CheckName = "mul_overflow"; break;
+ default: llvm_unreachable("unexpected opcode for bin op check");
+ }
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.E->getType()));
+ }
+ DynamicData.push_back(Info.LHS);
+ DynamicData.push_back(Info.RHS);
+ }
+
+ CGF.EmitCheck(Check, CheckName, StaticData, DynamicData);
+}
+
//===----------------------------------------------------------------------===//
// Visitor Methods
//===----------------------------------------------------------------------===//
@@ -802,7 +965,8 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
// debug info size.
CGDebugInfo *DI = CGF.getDebugInfo();
if (DI &&
- CGF.CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo) {
+ CGF.CGM.getCodeGenOpts().getDebugInfo()
+ == CodeGenOptions::LimitedDebugInfo) {
QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
@@ -1032,7 +1196,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// are in the same order as in the CastKind enum.
switch (Kind) {
case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
-
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
Value *V = EmitLValue(E).getAddress();
@@ -1055,19 +1221,18 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Visit(const_cast<Expr*>(E));
case CK_BaseToDerived: {
- const CXXRecordDecl *DerivedClassDecl =
- DestTy->getCXXRecordDeclForPointerType();
-
- return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
+ const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
+ assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
+
+ return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
CE->path_begin(), CE->path_end(),
ShouldNullCheckClassCastValue(CE));
}
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
- const RecordType *DerivedClassTy =
- E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
- CXXRecordDecl *DerivedClassDecl =
- cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+ const CXXRecordDecl *DerivedClassDecl =
+ E->getType()->getPointeeCXXRecordDecl();
+ assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!");
return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
CE->path_begin(), CE->path_end(),
@@ -1248,17 +1413,20 @@ llvm::Value *ScalarExprEmitter::
EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
llvm::Value *InVal,
llvm::Value *NextVal, bool IsInc) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ // Fall through.
case LangOptions::SOB_Trapping:
BinOpInfo BinOp;
BinOp.LHS = InVal;
BinOp.RHS = NextVal;
BinOp.Ty = E->getType();
BinOp.Opcode = BO_Add;
+ BinOp.FPContractable = false;
BinOp.E = E;
return EmitOverflowCheckedBinOp(BinOp);
}
@@ -1300,7 +1468,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Most common case by far: integer increment.
} else if (type->isIntegerType()) {
- llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
// Note that signed integer inc/dec with width less than int can't
// overflow because of promotion rules; we're just eliding a few steps here.
@@ -1320,7 +1488,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
= CGF.getContext().getAsVariableArrayType(type)) {
llvm::Value *numElts = CGF.getVLASize(vla).first;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, numElts, "vla.inc");
else
value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
@@ -1330,7 +1498,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = Builder.getInt32(amount);
value = CGF.EmitCastToVoidPtr(value);
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.funcptr");
else
value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
@@ -1339,7 +1507,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// For everything else, we can just do a simple increment.
} else {
llvm::Value *amt = Builder.getInt32(amount);
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.ptr");
else
value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
@@ -1400,7 +1568,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *sizeValue =
llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
else
value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
@@ -1444,6 +1612,7 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
BinOp.Ty = E->getType();
BinOp.Opcode = BO_Sub;
+ BinOp.FPContractable = false;
BinOp.E = E;
return EmitSub(BinOp);
}
@@ -1652,6 +1821,7 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
Result.RHS = Visit(E->getRHS());
Result.Ty = E->getType();
Result.Opcode = E->getOpcode();
+ Result.FPContractable = E->isFPContractable();
Result.E = E;
return Result;
}
@@ -1678,9 +1848,10 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.RHS = Visit(E->getRHS());
OpInfo.Ty = E->getComputationResultType();
OpInfo.Opcode = E->getOpcode();
+ OpInfo.FPContractable = false;
OpInfo.E = E;
// Load/convert the LHS.
- LValue LHSLV = EmitCheckedLValue(E->getLHS());
+ LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
OpInfo.LHS = EmitLoadOfLValue(LHSLV);
llvm::PHINode *atomicPHI = 0;
@@ -1740,7 +1911,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
return 0;
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOpts().CPlusPlus)
+ if (!CGF.getLangOpts().CPlusPlus)
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -1752,56 +1923,44 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
}
void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
- const BinOpInfo &Ops,
- llvm::Value *Zero, bool isDiv) {
- llvm::Function::iterator insertPt = Builder.GetInsertBlock();
- llvm::BasicBlock *contBB =
- CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn,
- llvm::next(insertPt));
- llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+ const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
+ llvm::Value *Cond = 0;
+
+ if (CGF.getLangOpts().SanitizeDivideByZero)
+ Cond = Builder.CreateICmpNE(Ops.RHS, Zero);
- llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
+ if (CGF.getLangOpts().SanitizeSignedIntegerOverflow &&
+ Ops.Ty->hasSignedIntegerRepresentation()) {
+ llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
- if (Ops.Ty->hasSignedIntegerRepresentation()) {
llvm::Value *IntMin =
Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
- llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero);
- llvm::Value *LHSCmp = Builder.CreateICmpEQ(Ops.LHS, IntMin);
- llvm::Value *RHSCmp = Builder.CreateICmpEQ(Ops.RHS, NegOne);
- llvm::Value *Cond2 = Builder.CreateAnd(LHSCmp, RHSCmp, "and");
- Builder.CreateCondBr(Builder.CreateOr(Cond1, Cond2, "or"),
- overflowBB, contBB);
- } else {
- CGF.Builder.CreateCondBr(Builder.CreateICmpEQ(Ops.RHS, Zero),
- overflowBB, contBB);
+ llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
+ llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
+ llvm::Value *Overflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
+ Cond = Cond ? Builder.CreateAnd(Cond, Overflow, "and") : Overflow;
}
- EmitOverflowBB(overflowBB);
- Builder.SetInsertPoint(contBB);
+
+ if (Cond)
+ EmitBinOpCheck(Cond, Ops);
}
Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
- if (isTrapvOverflowBehavior()) {
+ if (CGF.getLangOpts().SanitizeDivideByZero ||
+ CGF.getLangOpts().SanitizeSignedIntegerOverflow) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
if (Ops.Ty->isIntegerType())
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
- else if (Ops.Ty->isRealFloatingType()) {
- llvm::Function::iterator insertPt = Builder.GetInsertBlock();
- llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn,
- llvm::next(insertPt));
- llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow",
- CGF.CurFn);
- CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero),
- overflowBB, DivCont);
- EmitOverflowBB(overflowBB);
- Builder.SetInsertPoint(DivCont);
- }
+ else if (CGF.getLangOpts().SanitizeDivideByZero &&
+ Ops.Ty->isRealFloatingType())
+ EmitBinOpCheck(Builder.CreateFCmpUNE(Ops.RHS, Zero), Ops);
}
if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
- if (CGF.getContext().getLangOpts().OpenCL) {
+ if (CGF.getLangOpts().OpenCL) {
// OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
llvm::Type *ValTy = Val->getType();
if (ValTy->isFloatTy() ||
@@ -1819,7 +1978,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
- if (isTrapvOverflowBehavior()) {
+ if (CGF.getLangOpts().SanitizeDivideByZero) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
if (Ops.Ty->isIntegerType())
@@ -1866,6 +2025,19 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+ // Handle overflow with llvm.trap if no custom handler has been specified.
+ const std::string *handlerName =
+ &CGF.getLangOpts().OverflowHandler;
+ if (handlerName->empty()) {
+ // If the signed-integer-overflow sanitizer is enabled, emit a call to its
+ // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
+ if (CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ EmitBinOpCheck(Builder.CreateNot(overflow), Ops);
+ else
+ CGF.EmitTrapvCheck(Builder.CreateNot(overflow));
+ return result;
+ }
+
// Branch in case of overflow.
llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
llvm::Function::iterator insertPt = initialBB;
@@ -1875,15 +2047,6 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Builder.CreateCondBr(overflow, overflowBB, continueBB);
- // Handle overflow with llvm.trap.
- const std::string *handlerName =
- &CGF.getContext().getLangOpts().OverflowHandler;
- if (handlerName->empty()) {
- EmitOverflowBB(overflowBB);
- Builder.SetInsertPoint(continueBB);
- return result;
- }
-
// If an overflow handler is set, then we want to call it and then use its
// result, if it returns.
Builder.SetInsertPoint(overflowBB);
@@ -2001,24 +2164,106 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
}
+// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
+// Addend. Use negMul and negAdd to negate the first operand of the Mul or
+// the add operand respectively. This allows fmuladd to represent a*b-c, or
+// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
+// efficient operations.
+static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
+ const CodeGenFunction &CGF, CGBuilderTy &Builder,
+ bool negMul, bool negAdd) {
+ assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
+
+ Value *MulOp0 = MulOp->getOperand(0);
+ Value *MulOp1 = MulOp->getOperand(1);
+ if (negMul) {
+ MulOp0 =
+ Builder.CreateFSub(
+ llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
+ "neg");
+ } else if (negAdd) {
+ Addend =
+ Builder.CreateFSub(
+ llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
+ "neg");
+ }
+
+ Value *FMulAdd =
+ Builder.CreateCall3(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
+ MulOp0, MulOp1, Addend);
+ MulOp->eraseFromParent();
+
+ return FMulAdd;
+}
+
+// Check whether it would be legal to emit an fmuladd intrinsic call to
+// represent op and if so, build the fmuladd.
+//
+// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
+// Does NOT check the type of the operation - it's assumed that this function
+// will be called from contexts where it's known that the type is contractable.
+static Value* tryEmitFMulAdd(const BinOpInfo &op,
+ const CodeGenFunction &CGF, CGBuilderTy &Builder,
+ bool isSub=false) {
+
+ assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
+ op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
+ "Only fadd/fsub can be the root of an fmuladd.");
+
+ // Check whether this op is marked as fusable.
+ if (!op.FPContractable)
+ return 0;
+
+ // Check whether -ffp-contract=on. (If -ffp-contract=off/fast, fusing is
+ // either disabled, or handled entirely by the LLVM backend).
+ if (CGF.getLangOpts().getFPContractMode() != LangOptions::FPC_On)
+ return 0;
+
+ // We have a potentially fusable op. Look for a mul on one of the operands.
+ if (llvm::BinaryOperator* LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
+ if (LHSBinOp->getOpcode() == llvm::Instruction::FMul) {
+ assert(LHSBinOp->getNumUses() == 0 &&
+ "Operations with multiple uses shouldn't be contracted.");
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ }
+ } else if (llvm::BinaryOperator* RHSBinOp =
+ dyn_cast<llvm::BinaryOperator>(op.RHS)) {
+ if (RHSBinOp->getOpcode() == llvm::Instruction::FMul) {
+ assert(RHSBinOp->getNumUses() == 0 &&
+ "Operations with multiple uses shouldn't be contracted.");
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ }
+ }
+
+ return 0;
+}
+
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
if (op.LHS->getType()->isPointerTy() ||
op.RHS->getType()->isPointerTy())
return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
if (op.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ // Fall through.
case LangOptions::SOB_Trapping:
return EmitOverflowCheckedBinOp(op);
}
}
- if (op.LHS->getType()->isFPOrFPVectorTy())
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
+ return FMulAdd;
+
return Builder.CreateFAdd(op.LHS, op.RHS, "add");
+ }
return Builder.CreateAdd(op.LHS, op.RHS, "add");
}
@@ -2027,18 +2272,24 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
// The LHS is always a pointer if either side is.
if (!op.LHS->getType()->isPointerTy()) {
if (op.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
+ // Fall through.
case LangOptions::SOB_Trapping:
return EmitOverflowCheckedBinOp(op);
}
}
- if (op.LHS->getType()->isFPOrFPVectorTy())
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
+ return FMulAdd;
return Builder.CreateFSub(op.LHS, op.RHS, "sub");
+ }
return Builder.CreateSub(op.LHS, op.RHS, "sub");
}
@@ -2108,14 +2359,34 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.CatchUndefined
- && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ if (CGF.getLangOpts().SanitizeShift &&
+ isa<llvm::IntegerType>(Ops.LHS->getType())) {
unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
- llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
- CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
- llvm::ConstantInt::get(RHS->getType(), Width)),
- Cont, CGF.getTrapBB());
- CGF.EmitBlock(Cont);
+ llvm::Value *WidthMinusOne =
+ llvm::ConstantInt::get(RHS->getType(), Width - 1);
+ // FIXME: Emit the branching explicitly rather than emitting the check
+ // twice.
+ EmitBinOpCheck(Builder.CreateICmpULE(RHS, WidthMinusOne), Ops);
+
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ // Check whether we are shifting any non-zero bits off the top of the
+ // integer.
+ llvm::Value *BitsShiftedOff =
+ Builder.CreateLShr(Ops.LHS,
+ Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros",
+ /*NUW*/true, /*NSW*/true),
+ "shl.check");
+ if (CGF.getLangOpts().CPlusPlus) {
+ // In C99, we are not permitted to shift a 1 bit into the sign bit.
+ // Under C++11's rules, shifting a 1 bit into the sign bit is
+ // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
+ // define signed left shifts, so we use the C99 and C++11 rules there).
+ llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
+ BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
+ }
+ llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
+ EmitBinOpCheck(Builder.CreateICmpEQ(BitsShiftedOff, Zero), Ops);
+ }
}
return Builder.CreateShl(Ops.LHS, RHS, "shl");
@@ -2128,14 +2399,11 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.CatchUndefined
- && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ if (CGF.getLangOpts().SanitizeShift &&
+ isa<llvm::IntegerType>(Ops.LHS->getType())) {
unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
- llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
- CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
- llvm::ConstantInt::get(RHS->getType(), Width)),
- Cont, CGF.getTrapBB());
- CGF.EmitBlock(Cont);
+ llvm::Value *WidthVal = llvm::ConstantInt::get(RHS->getType(), Width);
+ EmitBinOpCheck(Builder.CreateICmpULT(RHS, WidthVal), Ops);
}
if (Ops.Ty->hasUnsignedIntegerRepresentation())
@@ -2326,7 +2594,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
case Qualifiers::OCL_Weak:
RHS = Visit(E->getRHS());
- LHS = EmitCheckedLValue(E->getLHS());
+ LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
break;
@@ -2336,7 +2604,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// __block variables need to have the rhs evaluated first, plus
// this should improve codegen just a little.
RHS = Visit(E->getRHS());
- LHS = EmitCheckedLValue(E->getLHS());
+ LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
// Store the value into the LHS. Bit-fields are handled specially
// because the result is altered by the store, i.e., [C99 6.5.16p1]
@@ -2353,7 +2621,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
return 0;
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOpts().CPlusPlus)
+ if (!CGF.getLangOpts().CPlusPlus)
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -2567,7 +2835,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// OpenCL: If the condition is a vector, we can treat this condition like
// the select function.
- if (CGF.getContext().getLangOpts().OpenCL
+ if (CGF.getLangOpts().OpenCL
&& condExpr->getType()->isVectorType()) {
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
llvm::Value *LHS = Visit(lhsExpr);
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 4ac172d..c90e4ec 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -21,7 +21,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/InlineAsm.h"
using namespace clang;
using namespace CodeGen;
@@ -440,8 +440,8 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
SourceLocation StartLoc) {
FunctionArgList args;
// Check if we should generate debug info for this method.
- if (CGM.getModuleDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
- DebugInfo = CGM.getModuleDebugInfo();
+ if (!OMD->hasAttr<NoDebugAttr>())
+ maybeInitializeDebugInfo();
llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
@@ -613,7 +613,16 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
// which translates to objc_storeStrong. This isn't required, but
// it's slightly nicer.
} else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
- Kind = Expression;
+ // Using standard expression emission for the setter is only
+ // acceptable if the ivar is __strong, which won't be true if
+ // the property is annotated with __attribute__((NSObject)).
+ // TODO: falling all the way back to objc_setProperty here is
+ // just laziness, though; we could still use objc_storeStrong
+ // if we hacked it right.
+ if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong)
+ Kind = Expression;
+ else
+ Kind = SetPropertyAndExpressionGet;
return;
// Otherwise, we need to at least use setProperty. However, if
@@ -801,6 +810,10 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
PropertyImplStrategy strategy(CGM, propImpl);
switch (strategy.getKind()) {
case PropertyImplStrategy::Native: {
+ // We don't need to do anything for a zero-size struct.
+ if (strategy.getIvarSize().isZero())
+ return;
+
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
// Currently, all atomic accesses have to be through integer
@@ -1032,12 +1045,7 @@ static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
static bool UseOptimizedSetter(CodeGenModule &CGM) {
if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
return false;
- const TargetInfo &Target = CGM.getContext().getTargetInfo();
-
- if (Target.getPlatformName() != "macosx")
- return false;
-
- return Target.getPlatformMinVersion() >= VersionTuple(10, 8);
+ return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter();
}
void
@@ -1064,6 +1072,10 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
PropertyImplStrategy strategy(CGM, propImpl);
switch (strategy.getKind()) {
case PropertyImplStrategy::Native: {
+ // We don't need to do anything for a zero-size struct.
+ if (strategy.getIvarSize().isZero())
+ return;
+
llvm::Value *argAddr = LocalDeclMap[*setterMethod->param_begin()];
LValue ivarLValue =
@@ -1097,7 +1109,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
llvm::Value *setOptimizedPropertyFn = 0;
llvm::Value *setPropertyFn = 0;
if (UseOptimizedSetter(CGM)) {
- // 10.8 code and GC is off
+ // 10.8 and iOS 6.0 code and GC is off
setOptimizedPropertyFn =
CGM.getObjCRuntime()
.GetOptimizedPropertySetFunction(strategy.isAtomic(),
@@ -1209,7 +1221,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
ivarRef.getType(), VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
EmitStmt(&assign);
}
@@ -1697,11 +1709,11 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
// references to the runtime support library. We don't really
// permit this to fail, but we need a particular relocation style.
if (llvm::Function *f = dyn_cast<llvm::Function>(fn)) {
- if (!CGM.getLangOpts().ObjCRuntime.hasARC())
+ if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC())
f->setLinkage(llvm::Function::ExternalWeakLinkage);
// set nonlazybind attribute for these APIs for performance.
if (fnName == "objc_retain" || fnName == "objc_release")
- f->addFnAttr(llvm::Attribute::NonLazyBind);
+ f->addFnAttr(llvm::Attributes::NonLazyBind);
}
return fn;
@@ -1945,6 +1957,28 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
}
}
+/// Destroy a __strong variable.
+///
+/// At -O0, emit a call to store 'null' into the address;
+/// instrumenting tools prefer this because the address is exposed,
+/// but it's relatively cumbersome to optimize.
+///
+/// At -O1 and above, just load and call objc_release.
+///
+/// call void \@objc_storeStrong(i8** %addr, i8* null)
+void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr, bool precise) {
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::PointerType *addrTy = cast<llvm::PointerType>(addr->getType());
+ llvm::Value *null = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(addrTy->getElementType()));
+ EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
+ return;
+ }
+
+ llvm::Value *value = Builder.CreateLoad(addr);
+ EmitARCRelease(value, precise);
+}
+
/// Store into a strong object. Always calls this:
/// call void \@objc_storeStrong(i8** %addr, i8* %value)
llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
@@ -2218,15 +2252,13 @@ void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
- llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
- CGF.EmitARCRelease(ptr, /*precise*/ true);
+ CGF.EmitARCDestroyStrong(addr, /*precise*/ true);
}
void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
- llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
- CGF.EmitARCRelease(ptr, /*precise*/ false);
+ CGF.EmitARCDestroyStrong(addr, /*precise*/ false);
}
void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
@@ -2730,7 +2762,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
// Keep track of the current cleanup stack depth.
RunCleanupsScope Scope(*this);
- if (CGM.getLangOpts().ObjCRuntime.hasARC()) {
+ if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
llvm::Value *token = EmitObjCAutoreleasePoolPush();
EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
} else {
@@ -2826,9 +2858,8 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
"__assign_helper_atomic_property_",
&CGM.getModule());
- if (CGM.getModuleDebugInfo())
- DebugInfo = CGM.getModuleDebugInfo();
-
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
@@ -2845,8 +2876,8 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
Expr *Args[2] = { &DST, &SRC };
CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
- Args, 2, DestTy->getPointeeType(),
- VK_LValue, SourceLocation());
+ Args, DestTy->getPointeeType(),
+ VK_LValue, SourceLocation(), false);
EmitStmt(&TheCall);
@@ -2912,9 +2943,8 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__copy_helper_atomic_property_", &CGM.getModule());
- if (CGM.getModuleDebugInfo())
- DebugInfo = CGM.getModuleDebugInfo();
-
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
@@ -2940,7 +2970,7 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
CXXConstructExpr::Create(C, Ty, SourceLocation(),
CXXConstExpr->getConstructor(),
CXXConstExpr->isElidable(),
- &ConstructorArgs[0], ConstructorArgs.size(),
+ ConstructorArgs,
CXXConstExpr->hadMultipleCandidates(),
CXXConstExpr->isListInitialization(),
CXXConstExpr->requiresZeroInitialization(),
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 6d129d0..68d234d 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -33,7 +33,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <cstdarg>
@@ -224,6 +224,25 @@ protected:
llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
return MakeGlobal(ArrayTy, V, Name, linkage);
}
+ /// Returns a property name and encoding string.
+ llvm::Constant *MakePropertyEncodingString(const ObjCPropertyDecl *PD,
+ const Decl *Container) {
+ ObjCRuntime R = CGM.getLangOpts().ObjCRuntime;
+ if ((R.getKind() == ObjCRuntime::GNUstep) &&
+ (R.getVersion() >= VersionTuple(1, 6))) {
+ std::string NameAndAttributes;
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ NameAndAttributes += '\0';
+ NameAndAttributes += TypeStr.length() + 3;
+ NameAndAttributes += TypeStr;
+ NameAndAttributes += '\0';
+ NameAndAttributes += PD->getNameAsString();
+ return llvm::ConstantExpr::getGetElementPtr(
+ CGM.GetAddrOfConstantString(NameAndAttributes), Zeros);
+ }
+ return MakeConstantString(PD->getNameAsString());
+ }
/// Ensures that the value has the required type, by inserting a bitcast if
/// required. This function lets us avoid inserting bitcasts that are
/// redundant.
@@ -514,7 +533,10 @@ public:
const CGBlockInfo &blockInfo) {
return NULLPtr;
}
-
+ virtual llvm::Constant *BuildRCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return NULLPtr;
+ }
virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
return 0;
}
@@ -578,6 +600,8 @@ class CGObjCGNUstep : public CGObjCGNU {
/// Type of an slot structure pointer. This is returned by the various
/// lookup functions.
llvm::Type *SlotTy;
+ public:
+ virtual llvm::Constant *GetEHType(QualType T);
protected:
virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
llvm::Value *&Receiver,
@@ -653,11 +677,40 @@ class CGObjCGNUstep : public CGObjCGNU {
}
};
-/// The ObjFW runtime, which closely follows the GCC runtime's
-/// compiler ABI. Support here is due to Jonathan Schleifer, the
-/// ObjFW maintainer.
-class CGObjCObjFW : public CGObjCGCC {
- /// Emit class references unconditionally as direct symbol references.
+/// Support for the ObjFW runtime. Support here is due to
+/// Jonathan Schleifer <js@webkeks.org>, the ObjFW maintainer.
+class CGObjCObjFW: public CGObjCGNU {
+protected:
+ /// The GCC ABI message lookup function. Returns an IMP pointing to the
+ /// method implementation for this message.
+ LazyRuntimeFunction MsgLookupFn;
+ /// The GCC ABI superclass message lookup function. Takes a pointer to a
+ /// structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the IMP for the corresponding method.
+ LazyRuntimeFunction MsgLookupSuperFn;
+
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *args[] = {
+ EnforceType(Builder, Receiver, IdTy),
+ EnforceType(Builder, cmd, SelectorTy) };
+ llvm::CallSite imp = CGF.EmitCallOrInvoke(MsgLookupFn, args);
+ imp->setMetadata(msgSendMDKind, node);
+ return imp.getInstruction();
+ }
+
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
+ PtrToObjCSuperTy), cmd};
+ return Builder.CreateCall(MsgLookupSuperFn, lookupArgs);
+ }
+
virtual llvm::Value *GetClassNamed(CGBuilderTy &Builder,
const std::string &Name, bool isWeak) {
if (isWeak)
@@ -678,7 +731,13 @@ class CGObjCObjFW : public CGObjCGCC {
}
public:
- CGObjCObjFW(CodeGenModule &Mod): CGObjCGCC(Mod) {}
+ CGObjCObjFW(CodeGenModule &Mod): CGObjCGNU(Mod, 9, 3) {
+ // IMP objc_msg_lookup(id, SEL);
+ MsgLookupFn.init(&CGM, "objc_msg_lookup", IMPTy, IdTy, SelectorTy, NULL);
+ // IMP objc_msg_lookup_super(struct objc_super*, SEL);
+ MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy,
+ PtrToObjCSuperTy, SelectorTy, NULL);
+ }
};
} // end anonymous namespace
@@ -909,29 +968,30 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
}
llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
- if (!CGM.getLangOpts().CPlusPlus) {
- if (T->isObjCIdType()
- || T->isObjCQualifiedIdType()) {
- // With the old ABI, there was only one kind of catchall, which broke
- // foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
- // a pointer indicating object catchalls, and NULL to indicate real
- // catchalls
- if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
- return MakeConstantString("@id");
- } else {
- return 0;
- }
- }
-
- // All other types should be Objective-C interface pointer types.
- const ObjCObjectPointerType *OPT =
- T->getAs<ObjCObjectPointerType>();
- assert(OPT && "Invalid @catch type.");
- const ObjCInterfaceDecl *IDecl =
- OPT->getObjectType()->getInterface();
- assert(IDecl && "Invalid @catch type.");
- return MakeConstantString(IDecl->getIdentifier()->getName());
+ if (T->isObjCIdType() || T->isObjCQualifiedIdType()) {
+ // With the old ABI, there was only one kind of catchall, which broke
+ // foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
+ // a pointer indicating object catchalls, and NULL to indicate real
+ // catchalls
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
+ return MakeConstantString("@id");
+ } else {
+ return 0;
+ }
}
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT = T->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl = OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ return MakeConstantString(IDecl->getIdentifier()->getName());
+}
+
+llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
+ if (!CGM.getLangOpts().CPlusPlus)
+ return CGObjCGNU::GetEHType(T);
+
// For Objective-C++, we want to provide the ability to catch both C++ and
// Objective-C objects in the same function.
@@ -1436,7 +1496,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
Elements.push_back(Zero);
Elements.push_back(llvm::ConstantInt::get(LongTy, info));
if (isMeta) {
- llvm::TargetData td(&TheModule);
+ llvm::DataLayout td(&TheModule);
Elements.push_back(
llvm::ConstantInt::get(LongTy,
td.getTypeSizeInBits(ClassTy) /
@@ -1595,13 +1655,13 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
std::string TypeStr;
Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
- InstanceMethodNames.push_back(
- MakeConstantString((*iter)->getSelector().getAsString()));
- InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
- } else {
OptionalInstanceMethodNames.push_back(
MakeConstantString((*iter)->getSelector().getAsString()));
OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ InstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
}
}
// Collect information about class methods:
@@ -1615,13 +1675,13 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
std::string TypeStr;
Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
- ClassMethodNames.push_back(
- MakeConstantString((*iter)->getSelector().getAsString()));
- ClassMethodTypes.push_back(MakeConstantString(TypeStr));
- } else {
OptionalClassMethodNames.push_back(
MakeConstantString((*iter)->getSelector().getAsString()));
OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ ClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
}
}
@@ -1656,7 +1716,9 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
std::vector<llvm::Constant*> Fields;
ObjCPropertyDecl *property = *iter;
- Fields.push_back(MakeConstantString(property->getNameAsString()));
+
+ Fields.push_back(MakePropertyEncodingString(property, PD));
+
Fields.push_back(llvm::ConstantInt::get(Int8Ty,
property->getPropertyAttributes()));
Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
@@ -1909,7 +1971,7 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
ObjCPropertyImplDecl::Synthesize);
- Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(MakePropertyEncodingString(property, OID));
Fields.push_back(llvm::ConstantInt::get(Int8Ty,
property->getPropertyAttributes()));
Fields.push_back(llvm::ConstantInt::get(Int8Ty, isSynthesized));
@@ -2011,7 +2073,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
// For non-fragile ivars, set the instance size to 0 - {the size of just this
// class}. The runtime will then set this to the correct value on load.
- if (CGM.getContext().getLangOpts().ObjCRuntime.isNonFragile()) {
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
instanceSize = 0 - (instanceSize - superInstanceSize);
}
@@ -2026,7 +2088,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Get the offset
uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
uint64_t Offset = BaseOffset;
- if (CGM.getContext().getLangOpts().ObjCRuntime.isNonFragile()) {
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
Offset = BaseOffset - superInstanceSize;
}
llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
@@ -2334,7 +2396,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Runtime version, used for ABI compatibility checking.
Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
// sizeof(ModuleTy)
- llvm::TargetData td(&TheModule);
+ llvm::DataLayout td(&TheModule);
Elements.push_back(
llvm::ConstantInt::get(LongTy,
td.getTypeSizeInBits(ModuleTy) /
@@ -2488,7 +2550,7 @@ void CGObjCGNU::EmitTryStmt(CodeGenFunction &CGF,
// Unlike the Apple non-fragile runtimes, which also uses
// unwind-based zero cost exceptions, the GNU Objective C runtime's
// EH support isn't a veneer over C++ EH. Instead, exception
- // objects are created by __objc_exception_throw and destroyed by
+ // objects are created by objc_exception_throw and destroyed by
// the personality function; this avoids the need for bracketing
// catch handlers with calls to __blah_begin_catch/__blah_end_catch
// (or even _Unwind_DeleteException), but probably doesn't
@@ -2513,7 +2575,9 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
ExceptionAsObject = CGF.ObjCEHValueStack.back();
}
ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
- CGF.EmitCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
+ llvm::CallSite Throw =
+ CGF.EmitCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
+ Throw.setDoesNotReturn();
CGF.Builder.CreateUnreachable();
CGF.Builder.ClearInsertionPoint();
}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index ef802a3..2203f01 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -36,7 +36,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <cstdio>
using namespace clang;
@@ -66,7 +66,8 @@ private:
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSend",
- llvm::Attribute::NonLazyBind);
+ llvm::Attributes::get(CGM.getLLVMContext(),
+ llvm::Attributes::NonLazyBind));
}
/// void objc_msgSend_stret (id, SEL, ...)
@@ -433,19 +434,19 @@ public:
/// SyncEnterFn - LLVM object_sync_enter function.
llvm::Constant *getSyncEnterFn() {
- // void objc_sync_enter (id)
+ // int objc_sync_enter (id)
llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, args, false);
+ llvm::FunctionType::get(CGM.IntTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
}
/// SyncExitFn - LLVM object_sync_exit function.
llvm::Constant *getSyncExitFn() {
- // void objc_sync_exit (id)
+ // int objc_sync_exit (id)
llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, args, false);
+ llvm::FunctionType::get(CGM.IntTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
}
@@ -583,7 +584,8 @@ public:
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty,
params, false),
"_setjmp",
- llvm::Attribute::ReturnsTwice);
+ llvm::Attributes::get(CGM.getLLVMContext(),
+ llvm::Attributes::NonLazyBind));
}
public:
@@ -753,6 +755,74 @@ public:
: skip(_skip), scan(_scan) {}
};
+ /// opcode for captured block variables layout 'instructions'.
+ /// In the following descriptions, 'I' is the value of the immediate field.
+ /// (field following the opcode).
+ ///
+ enum BLOCK_LAYOUT_OPCODE {
+ /// An operator which affects how the following layout should be
+ /// interpreted.
+ /// I == 0: Halt interpretation and treat everything else as
+ /// a non-pointer. Note that this instruction is equal
+ /// to '\0'.
+ /// I != 0: Currently unused.
+ BLOCK_LAYOUT_OPERATOR = 0,
+
+ /// The next I+1 bytes do not contain a value of object pointer type.
+ /// Note that this can leave the stream unaligned, meaning that
+ /// subsequent word-size instructions do not begin at a multiple of
+ /// the pointer size.
+ BLOCK_LAYOUT_NON_OBJECT_BYTES = 1,
+
+ /// The next I+1 words do not contain a value of object pointer type.
+ /// This is simply an optimized version of BLOCK_LAYOUT_BYTES for
+ /// when the required skip quantity is a multiple of the pointer size.
+ BLOCK_LAYOUT_NON_OBJECT_WORDS = 2,
+
+ /// The next I+1 words are __strong pointers to Objective-C
+ /// objects or blocks.
+ BLOCK_LAYOUT_STRONG = 3,
+
+ /// The next I+1 words are pointers to __block variables.
+ BLOCK_LAYOUT_BYREF = 4,
+
+ /// The next I+1 words are __weak pointers to Objective-C
+ /// objects or blocks.
+ BLOCK_LAYOUT_WEAK = 5,
+
+ /// The next I+1 words are __unsafe_unretained pointers to
+ /// Objective-C objects or blocks.
+ BLOCK_LAYOUT_UNRETAINED = 6
+
+ /// The next I+1 words are block or object pointers with some
+ /// as-yet-unspecified ownership semantics. If we add more
+ /// flavors of ownership semantics, values will be taken from
+ /// this range.
+ ///
+ /// This is included so that older tools can at least continue
+ /// processing the layout past such things.
+ //BLOCK_LAYOUT_OWNERSHIP_UNKNOWN = 7..10,
+
+ /// All other opcodes are reserved. Halt interpretation and
+ /// treat everything else as opaque.
+ };
+
+ class RUN_SKIP {
+ public:
+ enum BLOCK_LAYOUT_OPCODE opcode;
+ CharUnits block_var_bytepos;
+ CharUnits block_var_size;
+ RUN_SKIP(enum BLOCK_LAYOUT_OPCODE Opcode = BLOCK_LAYOUT_OPERATOR,
+ CharUnits BytePos = CharUnits::Zero(),
+ CharUnits Size = CharUnits::Zero())
+ : opcode(Opcode), block_var_bytepos(BytePos), block_var_size(Size) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const RUN_SKIP &b) const {
+ return block_var_bytepos < b.block_var_bytepos;
+ }
+ };
+
protected:
llvm::LLVMContext &VMContext;
// FIXME! May not be needing this after all.
@@ -761,6 +831,9 @@ protected:
// gc ivar layout bitmap calculation helper caches.
SmallVector<GC_IVAR, 16> SkipIvars;
SmallVector<GC_IVAR, 16> IvarsInfo;
+
+ // arc/mrr layout of captured block literal variables.
+ SmallVector<RUN_SKIP, 16> RunSkipBlockVars;
/// LazySymbols - Symbols to generate a lazy reference for. See
/// DefinedSymbols and FinishModule().
@@ -869,6 +942,24 @@ protected:
ArrayRef<const FieldDecl*> RecFields,
unsigned int BytePos, bool ForStrongLayout,
bool &HasUnion);
+
+ Qualifiers::ObjCLifetime getBlockCaptureLifetime(QualType QT);
+
+ void UpdateRunSkipBlockVars(bool IsByref,
+ Qualifiers::ObjCLifetime LifeTime,
+ CharUnits FieldOffset,
+ CharUnits FieldSize);
+
+ void BuildRCBlockVarRecordLayout(const RecordType *RT,
+ CharUnits BytePos, bool &HasUnion);
+
+ void BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
+ const RecordDecl *RD,
+ ArrayRef<const FieldDecl*> RecFields,
+ CharUnits BytePos, bool &HasUnion);
+
+ uint64_t InlineLayoutInstruction(SmallVectorImpl<unsigned char> &Layout);
+
/// GetIvarLayoutName - Returns a unique constant for the given
/// ivar layout bitmap.
@@ -959,6 +1050,8 @@ public:
virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CGBlockInfo &blockInfo);
+ virtual llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo);
};
@@ -1787,8 +1880,8 @@ static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
+
llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
-
if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
!CGM.getLangOpts().ObjCAutoRefCount)
return nullPtr;
@@ -1807,7 +1900,7 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
// Calculate the basic layout of the block structure.
const llvm::StructLayout *layout =
- CGM.getTargetData().getStructLayout(blockInfo.StructureType);
+ CGM.getDataLayout().getStructLayout(blockInfo.StructureType);
// Ignore the optional 'this' capture: C++ objects are not assumed
// to be GC'ed.
@@ -1860,7 +1953,7 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
if (CGM.getLangOpts().ObjCGCBitmapPrint) {
printf("\n block variable layout for block: ");
- const unsigned char *s = (unsigned char*)BitMap.c_str();
+ const unsigned char *s = (const unsigned char*)BitMap.c_str();
for (unsigned i = 0, e = BitMap.size(); i < e; i++)
if (!(s[i] & 0xf0))
printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
@@ -1872,6 +1965,476 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
return C;
}
+/// getBlockCaptureLifetime - This routine returns life time of the captured
+/// block variable for the purpose of block layout meta-data generation. FQT is
+/// the type of the variable captured in the block.
+Qualifiers::ObjCLifetime CGObjCCommonMac::getBlockCaptureLifetime(QualType FQT) {
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ return FQT.getObjCLifetime();
+
+ // MRR.
+ if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
+ return Qualifiers::OCL_ExplicitNone;
+
+ return Qualifiers::OCL_None;
+}
+
+void CGObjCCommonMac::UpdateRunSkipBlockVars(bool IsByref,
+ Qualifiers::ObjCLifetime LifeTime,
+ CharUnits FieldOffset,
+ CharUnits FieldSize) {
+ // __block variables are passed by their descriptor address.
+ if (IsByref)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_BYREF, FieldOffset,
+ FieldSize));
+ else if (LifeTime == Qualifiers::OCL_Strong)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_STRONG, FieldOffset,
+ FieldSize));
+ else if (LifeTime == Qualifiers::OCL_Weak)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_WEAK, FieldOffset,
+ FieldSize));
+ else if (LifeTime == Qualifiers::OCL_ExplicitNone)
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_UNRETAINED, FieldOffset,
+ FieldSize));
+ else
+ RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_NON_OBJECT_BYTES,
+ FieldOffset,
+ FieldSize));
+}
+
+void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
+ const RecordDecl *RD,
+ ArrayRef<const FieldDecl*> RecFields,
+ CharUnits BytePos, bool &HasUnion) {
+ bool IsUnion = (RD && RD->isUnion());
+ CharUnits MaxUnionSize = CharUnits::Zero();
+ const FieldDecl *MaxField = 0;
+ const FieldDecl *LastFieldBitfieldOrUnnamed = 0;
+ CharUnits MaxFieldOffset = CharUnits::Zero();
+ CharUnits LastBitfieldOrUnnamedOffset = CharUnits::Zero();
+
+ if (RecFields.empty())
+ return;
+ unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ const FieldDecl *Field = RecFields[i];
+ // Note that 'i' here is actually the field index inside RD of Field,
+ // although this dependency is hidden.
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ CharUnits FieldOffset =
+ CGM.getContext().toCharUnitsFromBits(RL.getFieldOffset(i));
+
+ // Skip over unnamed or bitfields
+ if (!Field->getIdentifier() || Field->isBitField()) {
+ LastFieldBitfieldOrUnnamed = Field;
+ LastBitfieldOrUnnamedOffset = FieldOffset;
+ continue;
+ }
+
+ LastFieldBitfieldOrUnnamed = 0;
+ QualType FQT = Field->getType();
+ if (FQT->isRecordType() || FQT->isUnionType()) {
+ if (FQT->isUnionType())
+ HasUnion = true;
+
+ BuildRCBlockVarRecordLayout(FQT->getAs<RecordType>(),
+ BytePos + FieldOffset, HasUnion);
+ continue;
+ }
+
+ if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ uint64_t ElCount = CArray->getSize().getZExtValue();
+ assert(CArray && "only array with known element size is supported");
+ FQT = CArray->getElementType();
+ while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ ElCount *= CArray->getSize().getZExtValue();
+ FQT = CArray->getElementType();
+ }
+
+ assert(!FQT->isUnionType() &&
+ "layout for array of unions not supported");
+ if (FQT->isRecordType() && ElCount) {
+ int OldIndex = RunSkipBlockVars.size() - 1;
+ const RecordType *RT = FQT->getAs<RecordType>();
+ BuildRCBlockVarRecordLayout(RT, BytePos + FieldOffset,
+ HasUnion);
+
+ // Replicate layout information for each array element. Note that
+ // one element is already done.
+ uint64_t ElIx = 1;
+ for (int FirstIndex = RunSkipBlockVars.size() - 1 ;ElIx < ElCount; ElIx++) {
+ CharUnits Size = CGM.getContext().getTypeSizeInChars(RT);
+ for (int i = OldIndex+1; i <= FirstIndex; ++i)
+ RunSkipBlockVars.push_back(
+ RUN_SKIP(RunSkipBlockVars[i].opcode,
+ RunSkipBlockVars[i].block_var_bytepos + Size*ElIx,
+ RunSkipBlockVars[i].block_var_size));
+ }
+ continue;
+ }
+ }
+ CharUnits FieldSize = CGM.getContext().getTypeSizeInChars(Field->getType());
+ if (IsUnion) {
+ CharUnits UnionIvarSize = FieldSize;
+ if (UnionIvarSize > MaxUnionSize) {
+ MaxUnionSize = UnionIvarSize;
+ MaxField = Field;
+ MaxFieldOffset = FieldOffset;
+ }
+ } else {
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(FQT),
+ BytePos + FieldOffset,
+ FieldSize);
+ }
+ }
+
+ if (LastFieldBitfieldOrUnnamed) {
+ if (LastFieldBitfieldOrUnnamed->isBitField()) {
+ // Last field was a bitfield. Must update the info.
+ uint64_t BitFieldSize
+ = LastFieldBitfieldOrUnnamed->getBitWidthValue(CGM.getContext());
+ unsigned UnsSize = (BitFieldSize / ByteSizeInBits) +
+ ((BitFieldSize % ByteSizeInBits) != 0);
+ CharUnits Size = CharUnits::fromQuantity(UnsSize);
+ Size += LastBitfieldOrUnnamedOffset;
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(LastFieldBitfieldOrUnnamed->getType()),
+ BytePos + LastBitfieldOrUnnamedOffset,
+ Size);
+ } else {
+ assert(!LastFieldBitfieldOrUnnamed->getIdentifier() &&"Expected unnamed");
+ // Last field was unnamed. Must update skip info.
+ CharUnits FieldSize
+ = CGM.getContext().getTypeSizeInChars(LastFieldBitfieldOrUnnamed->getType());
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(LastFieldBitfieldOrUnnamed->getType()),
+ BytePos + LastBitfieldOrUnnamedOffset,
+ FieldSize);
+ }
+ }
+
+ if (MaxField)
+ UpdateRunSkipBlockVars(false,
+ getBlockCaptureLifetime(MaxField->getType()),
+ BytePos + MaxFieldOffset,
+ MaxUnionSize);
+}
+
+void CGObjCCommonMac::BuildRCBlockVarRecordLayout(const RecordType *RT,
+ CharUnits BytePos,
+ bool &HasUnion) {
+ const RecordDecl *RD = RT->getDecl();
+ SmallVector<const FieldDecl*, 16> Fields;
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i)
+ Fields.push_back(*i);
+ llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ const llvm::StructLayout *RecLayout =
+ CGM.getDataLayout().getStructLayout(cast<llvm::StructType>(Ty));
+
+ BuildRCRecordLayout(RecLayout, RD, Fields, BytePos, HasUnion);
+}
+
+/// InlineLayoutInstruction - This routine produce an inline instruction for the
+/// block variable layout if it can. If not, it returns 0. Rules are as follow:
+/// If ((uintptr_t) layout) < (1 << 12), the layout is inline. In the 64bit world,
+/// an inline layout of value 0x0000000000000xyz is interpreted as follows:
+/// x captured object pointers of BLOCK_LAYOUT_STRONG. Followed by
+/// y captured object of BLOCK_LAYOUT_BYREF. Followed by
+/// z captured object of BLOCK_LAYOUT_WEAK. If any of the above is missing, zero
+/// replaces it. For example, 0x00000x00 means x BLOCK_LAYOUT_STRONG and no
+/// BLOCK_LAYOUT_BYREF and no BLOCK_LAYOUT_WEAK objects are captured.
+uint64_t CGObjCCommonMac::InlineLayoutInstruction(
+ SmallVectorImpl<unsigned char> &Layout) {
+ uint64_t Result = 0;
+ if (Layout.size() <= 3) {
+ unsigned size = Layout.size();
+ unsigned strong_word_count = 0, byref_word_count=0, weak_word_count=0;
+ unsigned char inst;
+ enum BLOCK_LAYOUT_OPCODE opcode ;
+ switch (size) {
+ case 3:
+ inst = Layout[0];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_STRONG)
+ strong_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ inst = Layout[1];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_BYREF)
+ byref_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ inst = Layout[2];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ break;
+
+ case 2:
+ inst = Layout[0];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_STRONG) {
+ strong_word_count = (inst & 0xF)+1;
+ inst = Layout[1];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_BYREF)
+ byref_word_count = (inst & 0xF)+1;
+ else if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ }
+ else if (opcode == BLOCK_LAYOUT_BYREF) {
+ byref_word_count = (inst & 0xF)+1;
+ inst = Layout[1];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ }
+ else
+ return 0;
+ break;
+
+ case 1:
+ inst = Layout[0];
+ opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_STRONG)
+ strong_word_count = (inst & 0xF)+1;
+ else if (opcode == BLOCK_LAYOUT_BYREF)
+ byref_word_count = (inst & 0xF)+1;
+ else if (opcode == BLOCK_LAYOUT_WEAK)
+ weak_word_count = (inst & 0xF)+1;
+ else
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ // Cannot inline when any of the word counts is 15. Because this is one less
+ // than the actual work count (so 15 means 16 actual word counts),
+ // and we can only display 0 thru 15 word counts.
+ if (strong_word_count == 16 || byref_word_count == 16 || weak_word_count == 16)
+ return 0;
+
+ unsigned count =
+ (strong_word_count != 0) + (byref_word_count != 0) + (weak_word_count != 0);
+
+ if (size == count) {
+ if (strong_word_count)
+ Result = strong_word_count;
+ Result <<= 4;
+ if (byref_word_count)
+ Result += byref_word_count;
+ Result <<= 4;
+ if (weak_word_count)
+ Result += weak_word_count;
+ }
+ }
+ return Result;
+}
+
+llvm::Constant *CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ assert(CGM.getLangOpts().getGC() == LangOptions::NonGC);
+
+ llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
+
+ RunSkipBlockVars.clear();
+ bool hasUnion = false;
+
+ unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+ unsigned WordSizeInBytes = WordSizeInBits/ByteSizeInBits;
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // Calculate the basic layout of the block structure.
+ const llvm::StructLayout *layout =
+ CGM.getDataLayout().getStructLayout(blockInfo.StructureType);
+
+ // Ignore the optional 'this' capture: C++ objects are not assumed
+ // to be GC'ed.
+
+ // Walk the captured variables.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ CharUnits fieldOffset =
+ CharUnits::fromQuantity(layout->getElementOffset(capture.getIndex()));
+
+ assert(!type->isArrayType() && "array variable should not be caught");
+ if (const RecordType *record = type->getAs<RecordType>()) {
+ BuildRCBlockVarRecordLayout(record, fieldOffset, hasUnion);
+ continue;
+ }
+ CharUnits fieldSize;
+ if (ci->isByRef())
+ fieldSize = CharUnits::fromQuantity(WordSizeInBytes);
+ else
+ fieldSize = CGM.getContext().getTypeSizeInChars(type);
+ UpdateRunSkipBlockVars(ci->isByRef(), getBlockCaptureLifetime(type),
+ fieldOffset, fieldSize);
+ }
+
+ if (RunSkipBlockVars.empty())
+ return nullPtr;
+
+ // Sort on byte position; captures might not be allocated in order,
+ // and unions can do funny things.
+ llvm::array_pod_sort(RunSkipBlockVars.begin(), RunSkipBlockVars.end());
+ SmallVector<unsigned char, 16> Layout;
+
+ unsigned size = RunSkipBlockVars.size();
+ for (unsigned i = 0; i < size; i++) {
+ enum BLOCK_LAYOUT_OPCODE opcode = RunSkipBlockVars[i].opcode;
+ CharUnits start_byte_pos = RunSkipBlockVars[i].block_var_bytepos;
+ CharUnits end_byte_pos = start_byte_pos;
+ unsigned j = i+1;
+ while (j < size) {
+ if (opcode == RunSkipBlockVars[j].opcode) {
+ end_byte_pos = RunSkipBlockVars[j++].block_var_bytepos;
+ i++;
+ }
+ else
+ break;
+ }
+ CharUnits size_in_bytes =
+ end_byte_pos - start_byte_pos + RunSkipBlockVars[j-1].block_var_size;
+ if (j < size) {
+ CharUnits gap =
+ RunSkipBlockVars[j].block_var_bytepos -
+ RunSkipBlockVars[j-1].block_var_bytepos - RunSkipBlockVars[j-1].block_var_size;
+ size_in_bytes += gap;
+ }
+ CharUnits residue_in_bytes = CharUnits::Zero();
+ if (opcode == BLOCK_LAYOUT_NON_OBJECT_BYTES) {
+ residue_in_bytes = size_in_bytes % WordSizeInBytes;
+ size_in_bytes -= residue_in_bytes;
+ opcode = BLOCK_LAYOUT_NON_OBJECT_WORDS;
+ }
+
+ unsigned size_in_words = size_in_bytes.getQuantity() / WordSizeInBytes;
+ while (size_in_words >= 16) {
+ // Note that value in imm. is one less that the actual
+ // value. So, 0xf means 16 words follow!
+ unsigned char inst = (opcode << 4) | 0xf;
+ Layout.push_back(inst);
+ size_in_words -= 16;
+ }
+ if (size_in_words > 0) {
+ // Note that value in imm. is one less that the actual
+ // value. So, we subtract 1 away!
+ unsigned char inst = (opcode << 4) | (size_in_words-1);
+ Layout.push_back(inst);
+ }
+ if (residue_in_bytes > CharUnits::Zero()) {
+ unsigned char inst =
+ (BLOCK_LAYOUT_NON_OBJECT_BYTES << 4) | (residue_in_bytes.getQuantity()-1);
+ Layout.push_back(inst);
+ }
+ }
+
+ int e = Layout.size()-1;
+ while (e >= 0) {
+ unsigned char inst = Layout[e--];
+ enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ if (opcode == BLOCK_LAYOUT_NON_OBJECT_BYTES || opcode == BLOCK_LAYOUT_NON_OBJECT_WORDS)
+ Layout.pop_back();
+ else
+ break;
+ }
+
+ uint64_t Result = InlineLayoutInstruction(Layout);
+ if (Result != 0) {
+ // Block variable layout instruction has been inlined.
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ printf("\n Inline instruction for block variable layout: ");
+ printf("0x0%llx\n", (unsigned long long)Result);
+ }
+ if (WordSizeInBytes == 8) {
+ const llvm::APInt Instruction(64, Result);
+ return llvm::Constant::getIntegerValue(CGM.Int64Ty, Instruction);
+ }
+ else {
+ const llvm::APInt Instruction(32, Result);
+ return llvm::Constant::getIntegerValue(CGM.Int32Ty, Instruction);
+ }
+ }
+
+ unsigned char inst = (BLOCK_LAYOUT_OPERATOR << 4) | 0;
+ Layout.push_back(inst);
+ std::string BitMap;
+ for (unsigned i = 0, e = Layout.size(); i != e; i++)
+ BitMap += Layout[i];
+
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ printf("\n block variable layout: ");
+ for (unsigned i = 0, e = BitMap.size(); i != e; i++) {
+ unsigned char inst = BitMap[i];
+ enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
+ unsigned delta = 1;
+ switch (opcode) {
+ case BLOCK_LAYOUT_OPERATOR:
+ printf("BL_OPERATOR:");
+ delta = 0;
+ break;
+ case BLOCK_LAYOUT_NON_OBJECT_BYTES:
+ printf("BL_NON_OBJECT_BYTES:");
+ break;
+ case BLOCK_LAYOUT_NON_OBJECT_WORDS:
+ printf("BL_NON_OBJECT_WORD:");
+ break;
+ case BLOCK_LAYOUT_STRONG:
+ printf("BL_STRONG:");
+ break;
+ case BLOCK_LAYOUT_BYREF:
+ printf("BL_BYREF:");
+ break;
+ case BLOCK_LAYOUT_WEAK:
+ printf("BL_WEAK:");
+ break;
+ case BLOCK_LAYOUT_UNRETAINED:
+ printf("BL_UNRETAINED:");
+ break;
+ }
+ // Actual value of word count is one more that what is in the imm.
+ // field of the instruction
+ printf("%d", (inst & 0xf) + delta);
+ if (i < e-1)
+ printf(", ");
+ else
+ printf("\n");
+ }
+ }
+
+ llvm::GlobalVariable * Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, BitMap,false),
+ "__TEXT,__objc_classname,cstring_literals", 1, true);
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD) {
// FIXME: I don't understand why gcc generates this, or where it is
@@ -2040,7 +2603,7 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
ArrayRef<llvm::Constant*> OptClassMethods,
ArrayRef<llvm::Constant*> MethodTypesExt) {
uint64_t Size =
- CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
llvm::Constant *Values[] = {
llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
@@ -2180,7 +2743,7 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
unsigned PropertySize =
- CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.PropertyTy);
llvm::Constant *Values[3];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
@@ -2269,7 +2832,7 @@ CGObjCMac::EmitMethodDescList(Twine Name, const char *Section,
};
*/
void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.CategoryTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.CategoryTy);
// FIXME: This is poor design, the OCD should have a pointer to the category
// decl. Additionally, note that Category can be null for the @implementation
@@ -2338,15 +2901,37 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
MethodDefinitions.clear();
}
-// FIXME: Get from somewhere?
-enum ClassFlags {
- eClassFlags_Factory = 0x00001,
- eClassFlags_Meta = 0x00002,
- // <rdr://5142207>
- eClassFlags_HasCXXStructors = 0x02000,
- eClassFlags_Hidden = 0x20000,
- eClassFlags_ABI2_Hidden = 0x00010,
- eClassFlags_ABI2_HasCXXStructors = 0x00004 // <rdr://4923634>
+enum FragileClassFlags {
+ FragileABI_Class_Factory = 0x00001,
+ FragileABI_Class_Meta = 0x00002,
+ FragileABI_Class_HasCXXStructors = 0x02000,
+ FragileABI_Class_Hidden = 0x20000
+};
+
+enum NonFragileClassFlags {
+ /// Is a meta-class.
+ NonFragileABI_Class_Meta = 0x00001,
+
+ /// Is a root class.
+ NonFragileABI_Class_Root = 0x00002,
+
+ /// Has a C++ constructor and destructor.
+ NonFragileABI_Class_HasCXXStructors = 0x00004,
+
+ /// Has hidden visibility.
+ NonFragileABI_Class_Hidden = 0x00010,
+
+ /// Has the exception attribute.
+ NonFragileABI_Class_Exception = 0x00020,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ NonFragileABI_Class_HasIvarReleaser = 0x00040,
+
+ /// Class implementation was compiled under ARC.
+ NonFragileABI_Class_CompiledByARC = 0x00080,
+
+ /// Class has non-trivial destructors, but zero-initialization is okay.
+ NonFragileABI_Class_HasCXXDestructorOnly = 0x00100
};
/*
@@ -2379,15 +2964,15 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getName(),
Interface->all_referenced_protocol_begin(),
Interface->all_referenced_protocol_end());
- unsigned Flags = eClassFlags_Factory;
- if (ID->hasCXXStructors())
- Flags |= eClassFlags_HasCXXStructors;
+ unsigned Flags = FragileABI_Class_Factory;
+ if (ID->hasNonZeroConstructors() || ID->hasDestructors())
+ Flags |= FragileABI_Class_HasCXXStructors;
unsigned Size =
CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity();
// FIXME: Set CXX-structors flag.
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
- Flags |= eClassFlags_Hidden;
+ Flags |= FragileABI_Class_Hidden;
llvm::SmallVector<llvm::Constant*, 16> InstanceMethods, ClassMethods;
for (ObjCImplementationDecl::instmeth_iterator
@@ -2470,11 +3055,11 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
ArrayRef<llvm::Constant*> Methods) {
- unsigned Flags = eClassFlags_Meta;
- unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
+ unsigned Flags = FragileABI_Class_Meta;
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassTy);
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
- Flags |= eClassFlags_Hidden;
+ Flags |= FragileABI_Class_Hidden;
llvm::Constant *Values[12];
// The isa for the metaclass is the root of the hierarchy.
@@ -2588,7 +3173,7 @@ llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
llvm::Constant *
CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
uint64_t Size =
- CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
llvm::Constant *Values[3];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
@@ -3481,7 +4066,7 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
@@ -3502,7 +4087,7 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
bool threadlocal) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
@@ -3528,7 +4113,7 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
@@ -3548,7 +4133,7 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
@@ -3679,7 +4264,7 @@ void CGObjCCommonMac::EmitImageInfo() {
static const int ModuleVersion = 7;
void CGObjCMac::EmitModuleInfo() {
- uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
+ uint64_t Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ModuleTy);
llvm::Constant *Values[] = {
llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion),
@@ -3824,7 +4409,7 @@ void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
Fields.push_back(*i);
llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
const llvm::StructLayout *RecLayout =
- CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
+ CGM.getDataLayout().getStructLayout(cast<llvm::StructType>(Ty));
BuildAggrIvarLayout(0, RecLayout, RD, Fields, BytePos,
ForStrongLayout, HasUnion);
@@ -3951,7 +4536,7 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
if (IsUnion) {
// FIXME: Why the asymmetry? We divide by word size in bits on other
// side.
- uint64_t UnionIvarSize = FieldSize;
+ uint64_t UnionIvarSize = FieldSize / ByteSizeInBits;
if (UnionIvarSize > MaxSkippedUnionIvarSize) {
MaxSkippedUnionIvarSize = UnionIvarSize;
MaxSkippedField = Field;
@@ -4005,7 +4590,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
// Build the string of skip/scan nibbles
SmallVector<SKIP_SCAN, 32> SkipScanIvars;
unsigned int WordSize =
- CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
+ CGM.getTypes().getDataLayout().getTypeAllocSize(PtrTy);
if (IvarsInfo[0].ivar_bytepos == 0) {
WordsToSkip = 0;
WordsToScan = IvarsInfo[0].ivar_size;
@@ -4187,7 +4772,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
printf("\n%s ivar layout for class '%s': ",
ForStrongLayout ? "strong" : "weak",
OMD->getClassInterface()->getName().data());
- const unsigned char *s = (unsigned char*)BitMap.c_str();
+ const unsigned char *s = (const unsigned char*)BitMap.c_str();
for (unsigned i = 0, e = BitMap.size(); i < e; i++)
if (!(s[i] & 0xf0))
printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
@@ -4835,7 +5420,7 @@ AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
llvm::GlobalValue::InternalLinkage,
Init,
SymbolName);
- GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection(SectionName);
CGM.AddUsedGlobal(GV);
}
@@ -4941,19 +5526,6 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
return VTableDispatchMethods.count(Sel);
}
-// Metadata flags
-enum MetaDataDlags {
- CLS = 0x0,
- CLS_META = 0x1,
- CLS_ROOT = 0x2,
- OBJC2_CLS_HIDDEN = 0x10,
- CLS_EXCEPTION = 0x20,
-
- /// (Obsolete) ARC-specific: this class has a .release_ivars method
- CLS_HAS_IVAR_RELEASER = 0x40,
- /// class was compiled with -fobjc-arr
- CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
-};
/// BuildClassRoTInitializer - generate meta-data for:
/// struct _class_ro_t {
/// uint32_t const flags;
@@ -4978,19 +5550,20 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
llvm::Constant *Values[10]; // 11 for 64bit targets!
if (CGM.getLangOpts().ObjCAutoRefCount)
- flags |= CLS_COMPILED_BY_ARC;
+ flags |= NonFragileABI_Class_CompiledByARC;
Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
// FIXME. For 64bit targets add 0 here.
- Values[ 3] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ Values[ 3] = (flags & NonFragileABI_Class_Meta)
+ ? GetIvarLayoutName(0, ObjCTypes)
: BuildIvarLayout(ID, true);
Values[ 4] = GetClassName(ID->getIdentifier());
// const struct _method_list_t * const baseMethods;
std::vector<llvm::Constant*> Methods;
std::string MethodListName("\01l_OBJC_$_");
- if (flags & CLS_META) {
+ if (flags & NonFragileABI_Class_Meta) {
MethodListName += "CLASS_METHODS_" + ID->getNameAsString();
for (ObjCImplementationDecl::classmeth_iterator
i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
@@ -5030,28 +5603,27 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
OID->all_referenced_protocol_begin(),
OID->all_referenced_protocol_end());
- if (flags & CLS_META)
+ if (flags & NonFragileABI_Class_Meta) {
Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
- else
- Values[ 7] = EmitIvarList(ID);
- Values[ 8] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
- : BuildIvarLayout(ID, false);
- if (flags & CLS_META)
+ Values[ 8] = GetIvarLayoutName(0, ObjCTypes);
Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
- else
+ } else {
+ Values[ 7] = EmitIvarList(ID);
+ Values[ 8] = BuildIvarLayout(ID, false);
Values[ 9] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
ID, ID->getClassInterface(), ObjCTypes);
+ }
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
Values);
llvm::GlobalVariable *CLASS_RO_GV =
new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
llvm::GlobalValue::InternalLinkage,
Init,
- (flags & CLS_META) ?
+ (flags & NonFragileABI_Class_Meta) ?
std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
CLASS_RO_GV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
CLASS_RO_GV->setSection("__DATA, __objc_const");
return CLASS_RO_GV;
@@ -5088,7 +5660,7 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
GV->setInitializer(Init);
GV->setSection("__DATA, __objc_data");
GV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassnfABITy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABITy));
if (HiddenVisibility)
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
return GV;
@@ -5138,23 +5710,31 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
"CGObjCNonFragileABIMac::GenerateClass - class is 0");
// FIXME: Is this correct (that meta class size is never computed)?
uint32_t InstanceStart =
- CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassnfABITy);
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassnfABITy);
uint32_t InstanceSize = InstanceStart;
- uint32_t flags = CLS_META;
+ uint32_t flags = NonFragileABI_Class_Meta;
std::string ObjCMetaClassName(getMetaclassSymbolPrefix());
std::string ObjCClassName(getClassSymbolPrefix());
llvm::GlobalVariable *SuperClassGV, *IsAGV;
+ // Build the flags for the metaclass.
bool classIsHidden =
ID->getClassInterface()->getVisibility() == HiddenVisibility;
if (classIsHidden)
- flags |= OBJC2_CLS_HIDDEN;
- if (ID->hasCXXStructors())
- flags |= eClassFlags_ABI2_HasCXXStructors;
+ flags |= NonFragileABI_Class_Hidden;
+
+ // FIXME: why is this flag set on the metaclass?
+ // ObjC metaclasses have no fields and don't really get constructed.
+ if (ID->hasNonZeroConstructors() || ID->hasDestructors()) {
+ flags |= NonFragileABI_Class_HasCXXStructors;
+ if (!ID->hasNonZeroConstructors())
+ flags |= NonFragileABI_Class_HasCXXDestructorOnly;
+ }
+
if (!ID->getClassInterface()->getSuperClass()) {
// class is root
- flags |= CLS_ROOT;
+ flags |= NonFragileABI_Class_Root;
SuperClassGV = GetClassGlobal(ObjCClassName + ClassName);
IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName);
} else {
@@ -5183,17 +5763,28 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
DefinedMetaClasses.push_back(MetaTClass);
// Metadata for the class
- flags = CLS;
+ flags = 0;
if (classIsHidden)
- flags |= OBJC2_CLS_HIDDEN;
- if (ID->hasCXXStructors())
- flags |= eClassFlags_ABI2_HasCXXStructors;
+ flags |= NonFragileABI_Class_Hidden;
+
+ if (ID->hasNonZeroConstructors() || ID->hasDestructors()) {
+ flags |= NonFragileABI_Class_HasCXXStructors;
+
+ // Set a flag to enable a runtime optimization when a class has
+ // fields that require destruction but which don't require
+ // anything except zero-initialization during construction. This
+ // is most notably true of __strong and __weak types, but you can
+ // also imagine there being C++ types with non-trivial default
+ // constructors that merely set all fields to null.
+ if (!ID->hasNonZeroConstructors())
+ flags |= NonFragileABI_Class_HasCXXDestructorOnly;
+ }
if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
- flags |= CLS_EXCEPTION;
+ flags |= NonFragileABI_Class_Exception;
if (!ID->getClassInterface()->getSuperClass()) {
- flags |= CLS_ROOT;
+ flags |= NonFragileABI_Class_Root;
SuperClassGV = 0;
} else {
// Has a root. Current class is not a root.
@@ -5220,7 +5811,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
DefinedNonLazyClasses.push_back(ClassMD);
// Force the definition of the EHType if necessary.
- if (flags & CLS_EXCEPTION)
+ if (flags & NonFragileABI_Class_Exception)
GetInterfaceEHType(ID->getClassInterface(), true);
// Make sure method definition entries are all clear for next implementation.
MethodDefinitions.clear();
@@ -5344,7 +5935,7 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Init,
ExtCatName);
GCATV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.CategorynfABITy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.CategorynfABITy));
GCATV->setSection("__DATA, __objc_const");
CGM.AddUsedGlobal(GCATV);
DefinedCategories.push_back(GCATV);
@@ -5391,7 +5982,7 @@ CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
llvm::Constant *Values[3];
// sizeof(struct _objc_method)
- unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.MethodTy);
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
// method_count
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
@@ -5403,7 +5994,7 @@ CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
llvm::GlobalValue::InternalLinkage, Init, Name);
- GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection(Section);
CGM.AddUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
@@ -5437,7 +6028,7 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
Offset));
IvarOffsetGV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.LongTy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.LongTy));
// FIXME: This matches gcc, but shouldn't the visibility be set on the use as
// well (i.e., in ObjCIvarOffsetVariable).
@@ -5490,7 +6081,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
Ivar[2] = GetMethodVarType(IVD);
llvm::Type *FieldTy =
CGM.getTypes().ConvertTypeForMem(IVD->getType());
- unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(FieldTy);
unsigned Align = CGM.getContext().getPreferredTypeAlign(
IVD->getType().getTypePtr()) >> 3;
Align = llvm::Log2_32(Align);
@@ -5508,7 +6099,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
llvm::Constant *Values[3];
- unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.IvarnfABITy);
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
@@ -5522,7 +6113,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
Init,
Prefix + OID->getName());
GV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection("__DATA, __objc_const");
CGM.AddUsedGlobal(GV);
@@ -5644,7 +6235,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getName(),
0, PD, ObjCTypes);
uint32_t Size =
- CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
@@ -5663,7 +6254,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
false, llvm::GlobalValue::WeakAnyLinkage, Init,
"\01l_OBJC_PROTOCOL_$_" + PD->getName());
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
Entry->setSection("__DATA,__datacoal_nt,coalesced");
Protocols[PD->getIdentifier()] = Entry;
@@ -5678,7 +6269,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
false, llvm::GlobalValue::WeakAnyLinkage, Entry,
"\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getName());
PTGV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.AddUsedGlobal(PTGV);
@@ -5732,7 +6323,7 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
Init, Name);
GV->setSection("__DATA, __objc_const");
GV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ CGM.getDataLayout().getABITypeAlignment(Init->getType()));
CGM.AddUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV,
ObjCTypes.ProtocolListnfABIPtrTy);
@@ -5970,7 +6561,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CGBuilderTy &Builder,
ClassGV,
"\01L_OBJC_CLASSLIST_REFERENCES_$_");
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(
+ CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
CGM.AddUsedGlobal(Entry);
@@ -6004,7 +6595,7 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
ClassGV,
"\01L_OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(
+ CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
CGM.AddUsedGlobal(Entry);
@@ -6030,7 +6621,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
MetaClassGV,
"\01L_OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(
+ CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
@@ -6079,16 +6670,9 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// If this is a class message the metaclass is passed as the target.
llvm::Value *Target;
- if (IsClassMessage) {
- if (isCategoryImpl) {
- // Message sent to "super' in a class method defined in
- // a category implementation.
- Target = EmitClassRef(CGF.Builder, Class);
- Target = CGF.Builder.CreateStructGEP(Target, 0);
- Target = CGF.Builder.CreateLoad(Target);
- } else
+ if (IsClassMessage)
Target = EmitMetaClassRef(CGF.Builder, Class);
- } else
+ else
Target = EmitSuperClassRef(CGF.Builder, Class);
// FIXME: We shouldn't need to do this cast, rectify the ASTContext and
@@ -6143,7 +6727,7 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *ivarOffset) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
@@ -6164,7 +6748,7 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
llvm::Value *src, llvm::Value *dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
@@ -6211,7 +6795,7 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
@@ -6232,7 +6816,7 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
bool threadlocal) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
- unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
: CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
@@ -6364,7 +6948,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
if (CGM.getLangOpts().getVisibilityMode() == HiddenVisibility)
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
- Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
+ Entry->setAlignment(CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.EHTypeTy));
if (ForDefinition) {
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 9aa6837..6932dd7 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -78,6 +78,13 @@ uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
CGM.getContext().getCharWidth();
}
+unsigned CGObjCRuntime::ComputeBitfieldBitOffset(
+ CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, ID, ID->getImplementation(), Ivar);
+}
+
LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *OID,
llvm::Value *BaseValue,
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 219a3e4..3e77875 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -261,6 +261,8 @@ public:
llvm::Value *Size) = 0;
virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CodeGen::CGBlockInfo &blockInfo) = 0;
+ virtual llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CodeGen::CGBlockInfo &blockInfo) = 0;
virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) = 0;
struct MessageSendInfo {
@@ -275,6 +277,12 @@ public:
MessageSendInfo getMessageSendInfo(const ObjCMethodDecl *method,
QualType resultType,
CallArgList &callArgs);
+
+ // FIXME: This probably shouldn't be here, but the code to compute
+ // it is here.
+ unsigned ComputeBitfieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
};
/// Creates an instance of an Objective-C runtime class.
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index d1b370a..7c83d39 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -105,7 +105,6 @@ public:
/// BuildTypeInfo - Build the RTTI type info struct for the given type.
///
/// \param Force - true to force the creation of this RTTI value
- /// \param ForEH - true if this is for exception handling
llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
};
}
@@ -779,28 +778,24 @@ static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
if (Base->isVirtual()) {
- if (Bases.VirtualBases.count(BaseDecl)) {
+ // Mark the virtual base as seen.
+ if (!Bases.VirtualBases.insert(BaseDecl)) {
// If this virtual base has been seen before, then the class is diamond
// shaped.
Flags |= RTTIBuilder::VMI_DiamondShaped;
} else {
if (Bases.NonVirtualBases.count(BaseDecl))
Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
-
- // Mark the virtual base as seen.
- Bases.VirtualBases.insert(BaseDecl);
}
} else {
- if (Bases.NonVirtualBases.count(BaseDecl)) {
+ // Mark the non-virtual base as seen.
+ if (!Bases.NonVirtualBases.insert(BaseDecl)) {
// If this non-virtual base has been seen before, then the class has non-
// diamond shaped repeated inheritance.
Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
} else {
if (Bases.VirtualBases.count(BaseDecl))
Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
-
- // Mark the non-virtual base as seen.
- Bases.NonVirtualBases.insert(BaseDecl);
}
}
@@ -891,7 +886,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
Offset = Layout.getBaseClassOffset(BaseDecl);
};
- OffsetFlags = Offset.getQuantity() << 8;
+ OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
// The low-order byte of __offset_flags contains flags, as given by the
// masks from the enumeration __offset_flags_masks.
@@ -982,7 +977,7 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
- if (!ForEH && !getContext().getLangOpts().RTTI)
+ if (!ForEH && !getLangOpts().RTTI)
return llvm::Constant::getNullValue(Int8PtrTy);
if (ForEH && Ty->isObjCObjectPointerType() &&
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index 94c822f..3db5e04 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -166,8 +166,8 @@ public:
class CGRecordLayout {
friend class CodeGenTypes;
- CGRecordLayout(const CGRecordLayout&); // DO NOT IMPLEMENT
- void operator=(const CGRecordLayout&); // DO NOT IMPLEMENT
+ CGRecordLayout(const CGRecordLayout &) LLVM_DELETED_FUNCTION;
+ void operator=(const CGRecordLayout &) LLVM_DELETED_FUNCTION;
private:
/// The LLVM type corresponding to this record layout; used when
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index d642ef8..26ef3ef 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -25,7 +25,7 @@
#include "llvm/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -206,7 +206,7 @@ void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
Packed = D->hasAttr<PackedAttr>();
- IsMsStruct = D->hasAttr<MsStructAttr>();
+ IsMsStruct = D->isMsStruct(Types.getContext());
if (D->isUnion()) {
LayoutUnion(D);
@@ -239,7 +239,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
CharUnits TypeSizeInBytes =
- CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
+ CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
@@ -259,7 +259,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
// in big-endian machines the first fields are in higher bit positions,
// so revert the offset. The byte offsets are reversed(back) later.
- if (Types.getTargetData().isBigEndian()) {
+ if (Types.getDataLayout().isBigEndian()) {
FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
}
@@ -334,7 +334,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
// on big-endian machines we reverted the bit offset because first fields are
// in higher bits. But this also reverts the bytes, so fix this here by reverting
// the byte offset on big-endian machines.
- if (Types.getTargetData().isBigEndian()) {
+ if (Types.getDataLayout().isBigEndian()) {
AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
ContainingTypeSizeInBits - AccessStart - AccessWidth);
} else {
@@ -553,9 +553,9 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
hasOnlyZeroSizedBitFields = false;
CharUnits fieldAlign = CharUnits::fromQuantity(
- Types.getTargetData().getABITypeAlignment(fieldType));
+ Types.getDataLayout().getABITypeAlignment(fieldType));
CharUnits fieldSize = CharUnits::fromQuantity(
- Types.getTargetData().getTypeAllocSize(fieldType));
+ Types.getDataLayout().getTypeAllocSize(fieldType));
if (fieldAlign < unionAlign)
continue;
@@ -884,7 +884,7 @@ void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
llvm::Type *fieldType) {
CharUnits fieldSize =
- CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
+ CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(fieldType));
FieldTypes.push_back(fieldType);
@@ -957,7 +957,7 @@ CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
if (Packed)
return CharUnits::One();
- return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
+ return CharUnits::fromQuantity(Types.getDataLayout().getABITypeAlignment(Ty));
}
CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
@@ -1036,7 +1036,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
- assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
+ assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
"Type size mismatch!");
if (BaseTy) {
@@ -1049,19 +1049,19 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
getContext().toBits(AlignedNonVirtualTypeSize);
assert(AlignedNonVirtualTypeSizeInBits ==
- getTargetData().getTypeAllocSizeInBits(BaseTy) &&
+ getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
"Type size mismatch!");
}
// Verify that the LLVM and AST field offsets agree.
llvm::StructType *ST =
dyn_cast<llvm::StructType>(RL->getLLVMType());
- const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
+ const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
RecordDecl::field_iterator it = D->field_begin();
const FieldDecl *LastFD = 0;
- bool IsMsStruct = D->hasAttr<MsStructAttr>();
+ bool IsMsStruct = D->isMsStruct(getContext());
for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
const FieldDecl *FD = *it;
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index d78908d..3548dba 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -21,7 +21,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/InlineAsm.h"
#include "llvm/Intrinsics.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -132,8 +132,8 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
- case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
- case Stmt::MSAsmStmtClass: EmitMSAsmStmt(cast<MSAsmStmt>(*S)); break;
+ case Stmt::GCCAsmStmtClass: // Intentional fall-through.
+ case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
case Stmt::ObjCAtTryStmtClass:
EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
@@ -237,6 +237,10 @@ void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
if (!BI || !BI->isUnconditional())
return;
+ // Can only simplify empty blocks.
+ if (BI != BB->begin())
+ return;
+
BB->replaceAllUsesWith(BI->getSuccessor(0));
BI->eraseFromParent();
BB->eraseFromParent();
@@ -743,6 +747,17 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// Emit the result value, even if unused, to evalute the side effects.
const Expr *RV = S.getRetValue();
+ // Treat block literals in a return expression as if they appeared
+ // in their own scope. This permits a small, easily-implemented
+ // exception to our over-conservative rules about not jumping to
+ // statements following block literals with non-trivial cleanups.
+ RunCleanupsScope cleanupScope(*this);
+ if (const ExprWithCleanups *cleanups =
+ dyn_cast_or_null<ExprWithCleanups>(RV)) {
+ enterFullExpression(cleanups);
+ RV = cleanups->getSubExpr();
+ }
+
// FIXME: Clean this up by using an LValue for ReturnTemp,
// EmitStoreThroughLValue, and EmitAnyExpr.
if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
@@ -779,6 +794,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
AggValueSlot::IsNotAliased));
}
+ cleanupScope.ForceCleanup();
EmitBranchThroughCleanup(ReturnBlock);
}
@@ -899,7 +915,8 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
// If the body of the case is just a 'break', and if there was no fallthrough,
// try to not emit an empty block.
- if ((CGM.getCodeGenOpts().OptimizationLevel > 0) && isa<BreakStmt>(S.getSubStmt())) {
+ if ((CGM.getCodeGenOpts().OptimizationLevel > 0) &&
+ isa<BreakStmt>(S.getSubStmt())) {
JumpDest Block = BreakContinueStack.back().BreakBlock;
// Only do this optimization if there are no cleanups that need emitting.
@@ -1263,6 +1280,10 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
case '=': // Will see this and the following in mult-alt constraints.
case '+':
break;
+ case '#': // Ignore the rest of the constraint alternative.
+ while (Constraint[1] && Constraint[1] != ',')
+ Constraint++;
+ break;
case ',':
Result += "|";
break;
@@ -1323,8 +1344,7 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
}
llvm::Value*
-CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
- const TargetInfo::ConstraintInfo &Info,
+CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
LValue InputValue, QualType InputType,
std::string &ConstraintStr) {
llvm::Value *Arg;
@@ -1333,7 +1353,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
Arg = EmitLoadOfLValue(InputValue).getScalarVal();
} else {
llvm::Type *Ty = ConvertType(InputType);
- uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
+ uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
Ty = llvm::PointerType::getUnqual(Ty);
@@ -1353,7 +1373,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
return Arg;
}
-llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
+llvm::Value* CodeGenFunction::EmitAsmInput(
const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr,
std::string &ConstraintStr) {
@@ -1363,7 +1383,7 @@ llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
LValue Dest = EmitLValue(InputExpr);
- return EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(), ConstraintStr);
+ return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr);
}
/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
@@ -1396,23 +1416,8 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
}
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
- // Analyze the asm string to decompose it into its pieces. We know that Sema
- // has already done this, so it is guaranteed to be successful.
- SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
- unsigned DiagOffs;
- S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
-
- // Assemble the pieces into the final asm string.
- std::string AsmString;
- for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
- if (Pieces[i].isString())
- AsmString += Pieces[i].getString();
- else if (Pieces[i].getModifier() == '\0')
- AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
- else
- AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
- Pieces[i].getModifier() + '}';
- }
+ // Assemble the final asm string.
+ std::string AsmString = S.generateAsmString(getContext());
// Get all the output and input constraints together.
SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
@@ -1511,7 +1516,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += ',';
const Expr *InputExpr = S.getOutputExpr(i);
- llvm::Value *Arg = EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(),
+ llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
InOutConstraints);
if (llvm::Type* AdjTy =
@@ -1549,7 +1554,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
*InputExpr->IgnoreParenNoopCasts(getContext()),
Target, CGM, S);
- llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
+ llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
// If this input argument is tied to a larger output result, extend the
// input to be the same size as the output. The LLVM backend wants to see
@@ -1596,7 +1601,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Clobbers
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
- StringRef Clobber = S.getClobber(i)->getString();
+ StringRef Clobber = S.getClobber(i);
if (Clobber != "memory" && Clobber != "cc")
Clobber = Target.getNormalizedGCCRegisterName(Clobber);
@@ -1628,15 +1633,22 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(ResultType, ArgTypes, false);
+ bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
+ llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
+ llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, AsmString, Constraints,
- S.isVolatile() || S.getNumOutputs() == 0);
+ llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
+ /* IsAlignStack */ false, AsmDialect);
llvm::CallInst *Result = Builder.CreateCall(IA, Args);
- Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+ Result->addAttribute(llvm::AttrListPtr::FunctionIndex,
+ llvm::Attributes::get(getLLVMContext(),
+ llvm::Attributes::NoUnwind));
// Slap the source location of the inline asm into a !srcloc metadata on the
- // call.
- Result->setMetadata("srcloc", getAsmSrcLocInfo(S.getAsmString(), *this));
+ // call. FIXME: Handle metadata for MS-style inline asms.
+ if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
+ Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
+ *this));
// Extract all of the register value results from the asm.
std::vector<llvm::Value*> RegResults;
@@ -1662,12 +1674,12 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (TruncTy->isFloatingPointTy())
Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
- uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
+ uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
Tmp = Builder.CreateTrunc(Tmp,
llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
} else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
- uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
+ uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
Tmp = Builder.CreatePtrToInt(Tmp,
llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
@@ -1681,47 +1693,3 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
}
}
-
-void CodeGenFunction::EmitMSAsmStmt(const MSAsmStmt &S) {
- // MS-style inline assembly is not fully supported, so sema emits a warning.
- if (!CGM.getCodeGenOpts().EmitMicrosoftInlineAsm)
- return;
-
- assert (S.isSimple() && "CodeGen can only handle simple MSAsmStmts.");
-
- std::vector<llvm::Value*> Args;
- std::vector<llvm::Type *> ArgTypes;
- std::string Constraints;
-
- // Clobbers
- for (unsigned i = 0, e = S.getNumClobbers(); i != e; ++i) {
- StringRef Clobber = S.getClobber(i);
-
- if (Clobber != "memory" && Clobber != "cc")
- Clobber = Target.getNormalizedGCCRegisterName(Clobber);
-
- if (i != 0)
- Constraints += ',';
-
- Constraints += "~{";
- Constraints += Clobber;
- Constraints += '}';
- }
-
- // Add machine specific clobbers
- std::string MachineClobbers = Target.getClobbers();
- if (!MachineClobbers.empty()) {
- if (!Constraints.empty())
- Constraints += ',';
- Constraints += MachineClobbers;
- }
-
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, ArgTypes, false);
-
- llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, *S.getAsmString(), Constraints, true);
- llvm::CallInst *Result = Builder.CreateCall(IA, Args);
- Result->addAttribute(~0, llvm::Attribute::NoUnwind);
- Result->addAttribute(~0, llvm::Attribute::IANSDialect);
-}
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index cdaa26a..5b37fe4 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -79,15 +79,16 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
llvm::Value *Ptr,
int64_t NonVirtualAdjustment,
- int64_t VirtualAdjustment) {
+ int64_t VirtualAdjustment,
+ bool IsReturnAdjustment) {
if (!NonVirtualAdjustment && !VirtualAdjustment)
return Ptr;
llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
- if (NonVirtualAdjustment) {
- // Do the non-virtual adjustment.
+ if (NonVirtualAdjustment && !IsReturnAdjustment) {
+ // Perform the non-virtual adjustment for a base-to-derived cast.
V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
}
@@ -95,7 +96,7 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- // Do the virtual adjustment.
+ // Perform the virtual adjustment.
llvm::Value *VTablePtrPtr =
CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
@@ -113,6 +114,11 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
V = CGF.Builder.CreateInBoundsGEP(V, Offset);
}
+ if (NonVirtualAdjustment && IsReturnAdjustment) {
+ // Perform the non-virtual adjustment for a derived-to-base cast.
+ V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ }
+
// Cast back to the original type.
return CGF.Builder.CreateBitCast(V, Ptr->getType());
}
@@ -150,8 +156,7 @@ static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
case TSK_ExplicitSpecialization:
case TSK_ImplicitInstantiation:
- if (!CGM.getCodeGenOpts().HiddenWeakTemplateVTables)
- return;
+ return;
break;
}
@@ -199,7 +204,8 @@ static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
ReturnValue = PerformTypeAdjustment(CGF, ReturnValue,
Thunk.Return.NonVirtual,
- Thunk.Return.VBaseOffsetOffset);
+ Thunk.Return.VBaseOffsetOffset,
+ /*IsReturnAdjustment*/true);
if (NullCheckValue) {
CGF.Builder.CreateBr(AdjustEnd);
@@ -248,7 +254,9 @@ void CodeGenFunction::GenerateVarArgsThunk(
llvm::Function *BaseFn = cast<llvm::Function>(Callee);
// Clone to thunk.
- llvm::Function *NewFn = llvm::CloneFunction(BaseFn);
+ llvm::ValueToValueMapTy VMap;
+ llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap,
+ /*ModuleLevelChanges=*/false);
CGM.getModule().getFunctionList().push_back(NewFn);
Fn->replaceAllUsesWith(NewFn);
NewFn->takeName(Fn);
@@ -281,7 +289,8 @@ void CodeGenFunction::GenerateVarArgsThunk(
llvm::Value *AdjustedThisPtr =
PerformTypeAdjustment(*this, ThisPtr,
Thunk.This.NonVirtual,
- Thunk.This.VCallOffsetOffset);
+ Thunk.This.VCallOffsetOffset,
+ /*IsReturnAdjustment*/false);
ThisStore->setOperand(0, AdjustedThisPtr);
if (!Thunk.Return.isEmpty()) {
@@ -324,7 +333,10 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
FunctionArgs.push_back(Param);
}
-
+
+ // Initialize debug info if needed.
+ maybeInitializeDebugInfo();
+
StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
SourceLocation());
@@ -335,7 +347,8 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
llvm::Value *AdjustedThisPtr =
PerformTypeAdjustment(*this, LoadCXXThis(),
Thunk.This.NonVirtual,
- Thunk.This.VCallOffsetOffset);
+ Thunk.This.VCallOffsetOffset,
+ /*IsReturnAdjustment*/false);
CallArgList CallArgs;
@@ -455,6 +468,8 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
return;
}
+ CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn);
+
if (ThunkFn->isVarArg()) {
// Varargs thunks are special; we can't just generate a call because
// we can't copy the varargs. Our implementation is rather
@@ -524,7 +539,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
unsigned NextVTableThunkIndex = 0;
- llvm::Constant* PureVirtualFn = 0;
+ llvm::Constant *PureVirtualFn = 0, *DeletedVirtualFn = 0;
for (unsigned I = 0; I != NumComponents; ++I) {
VTableComponent Component = Components[I];
@@ -573,14 +588,25 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
// We have a pure virtual member function.
if (!PureVirtualFn) {
- llvm::FunctionType *Ty =
- llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
- PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
- PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
+ llvm::FunctionType *Ty =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
+ PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
+ PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
CGM.Int8PtrTy);
}
Init = PureVirtualFn;
+ } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) {
+ if (!DeletedVirtualFn) {
+ llvm::FunctionType *Ty =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ StringRef DeletedCallName =
+ CGM.getCXXABI().GetDeletedVirtualCallName();
+ DeletedVirtualFn = CGM.CreateRuntimeFunction(Ty, DeletedCallName);
+ DeletedVirtualFn = llvm::ConstantExpr::getBitCast(DeletedVirtualFn,
+ CGM.Int8PtrTy);
+ }
+ Init = DeletedVirtualFn;
} else {
// Check if we should use a thunk.
if (NextVTableThunkIndex < NumVTableThunks &&
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index dd32167..9d6d183 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -65,9 +65,11 @@ namespace clang {
TargetOpts(targetopts),
LangOpts(langopts),
AsmOutStream(OS),
+ Context(),
LLVMIRGeneration("LLVM IR Generation Time"),
Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)),
- LinkModule(LinkModule) {
+ LinkModule(LinkModule)
+ {
llvm::TimePassesIsEnabled = TimePasses;
}
@@ -379,7 +381,7 @@ void CodeGenAction::ExecuteAction() {
// FIXME: This is stupid, IRReader shouldn't take ownership.
llvm::MemoryBuffer *MainFileCopy =
llvm::MemoryBuffer::getMemBufferCopy(MainFile->getBuffer(),
- getCurrentFile().c_str());
+ getCurrentFile());
llvm::SMDiagnostic Err;
TheModule.reset(ParseIR(MainFileCopy, Err, *VMContext));
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 1d02861..18f1623 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -24,7 +24,7 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
#include "llvm/MDBuilder.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -32,6 +32,10 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm),
Target(CGM.getContext().getTargetInfo()),
Builder(cgm.getModule().getContext()),
+ SanitizePerformTypeCheck(CGM.getLangOpts().SanitizeNull |
+ CGM.getLangOpts().SanitizeAlignment |
+ CGM.getLangOpts().SanitizeObjectSize |
+ CGM.getLangOpts().SanitizeVptr),
AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1),
FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
@@ -40,8 +44,6 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0), CXXVTTDecl(0),
CXXVTTValue(0), OutermostConditional(0), TerminateLandingPad(0),
TerminateHandler(0), TrapBB(0) {
-
- CatchUndefined = getContext().getLangOpts().CatchUndefined;
if (!suppressNewContext)
CGM.getCXXABI().getMangleContext().startNewFunction();
}
@@ -348,11 +350,11 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
RE = FD->redecls_end(); RI != RE; ++RI)
if (RI->isInlineSpecified()) {
- Fn->addFnAttr(llvm::Attribute::InlineHint);
+ Fn->addFnAttr(llvm::Attributes::InlineHint);
break;
}
- if (getContext().getLangOpts().OpenCL) {
+ if (getLangOpts().OpenCL) {
// Add metadata for a kernel function.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
EmitOpenCLKernelMetadata(FD, Fn);
@@ -485,7 +487,7 @@ static void TryMarkNoThrow(llvm::Function *F) {
} else if (isa<llvm::ResumeInst>(&*BI)) {
return;
}
- F->setDoesNotThrow(true);
+ F->setDoesNotThrow();
}
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
@@ -493,8 +495,8 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
// Check if we should generate debug info for this function.
- if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>())
- DebugInfo = CGM.getModuleDebugInfo();
+ if (!FD->hasAttr<NoDebugAttr>())
+ maybeInitializeDebugInfo();
FunctionArgList Args;
QualType ResTy = FD->getResultType();
@@ -517,7 +519,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
EmitDestructorBody(Args);
else if (isa<CXXConstructorDecl>(FD))
EmitConstructorBody(Args);
- else if (getContext().getLangOpts().CUDA &&
+ else if (getLangOpts().CUDA &&
!CGM.getCodeGenOpts().CUDAIsDevice &&
FD->hasAttr<CUDAGlobalAttr>())
CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
@@ -535,6 +537,24 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
else
EmitFunctionBody(Args);
+ // C++11 [stmt.return]p2:
+ // Flowing off the end of a function [...] results in undefined behavior in
+ // a value-returning function.
+ // C11 6.9.1p12:
+ // If the '}' that terminates a function is reached, and the value of the
+ // function call is used by the caller, the behavior is undefined.
+ if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() &&
+ !FD->getResultType()->isVoidType() && Builder.GetInsertBlock()) {
+ if (getLangOpts().SanitizeReturn)
+ EmitCheck(Builder.getFalse(), "missing_return",
+ EmitCheckSourceLocation(FD->getLocation()),
+ llvm::ArrayRef<llvm::Value*>());
+ else if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap));
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ }
+
// Emit the standard function epilogue.
FinishFunction(BodyRange.getEnd());
@@ -806,7 +826,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
void
CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Ignore empty classes in C++.
- if (getContext().getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
return;
@@ -983,8 +1003,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
arrayType = getContext().getAsArrayType(eltType);
}
- unsigned AddressSpace =
- cast<llvm::PointerType>(addr->getType())->getAddressSpace();
+ unsigned AddressSpace = addr->getType()->getPointerAddressSpace();
llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
} else {
@@ -1027,6 +1046,7 @@ CodeGenFunction::getVLASize(const VariableArrayType *type) {
numElements = vlaSize;
} else {
// It's undefined behavior if this wraps around, so mark it that way.
+ // FIXME: Teach -fcatch-undefined-behavior to trap this.
numElements = Builder.CreateNUWMul(numElements, vlaSize);
}
} while ((type = getContext().getAsVariableArrayType(elementType)));
@@ -1104,10 +1124,26 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
// e.g. with a typedef and a pointer to it.
llvm::Value *&entry = VLASizeMap[size];
if (!entry) {
+ llvm::Value *Size = EmitScalarExpr(size);
+
+ // C11 6.7.6.2p5:
+ // If the size is an expression that is not an integer constant
+ // expression [...] each time it is evaluated it shall have a value
+ // greater than zero.
+ if (getLangOpts().SanitizeVLABound &&
+ size->getType()->isSignedIntegerType()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
+ llvm::Constant *StaticArgs[] = {
+ EmitCheckSourceLocation(size->getLocStart()),
+ EmitCheckTypeDescriptor(size->getType())
+ };
+ EmitCheck(Builder.CreateICmpSGT(Size, Zero),
+ "vla_bound_not_positive", StaticArgs, Size);
+ }
+
// Always zexting here would be wrong if it weren't
// undefined behavior to have a negative bound.
- entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy,
- /*signed*/ false);
+ entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
}
}
type = vat->getElementType();
@@ -1156,7 +1192,7 @@ void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
llvm::Constant *Init) {
assert (Init && "Invalid DeclRefExpr initializer!");
if (CGDebugInfo *Dbg = getDebugInfo())
- if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo)
+ if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
Dbg->EmitGlobalVariable(E->getDecl(), Init);
}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index ed3e43b..f2ab226 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -222,8 +222,7 @@ public:
/// immediately-enclosing context of the cleanup scope. For
/// EH cleanups, this is run in a terminate context.
///
- // \param IsForEHCleanup true if this is for an EH cleanup, false
- /// if for a normal cleanup.
+ // \param flags cleanup kind.
virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
};
@@ -533,8 +532,8 @@ public:
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
class CodeGenFunction : public CodeGenTypeCache {
- CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
- void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
+ CodeGenFunction(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
+ void operator=(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
friend class CGCXXABI;
public:
@@ -595,8 +594,9 @@ public:
/// potentially higher performance penalties.
unsigned char BoundsChecking;
- /// CatchUndefined - Emit run-time checks to catch undefined behaviors.
- bool CatchUndefined;
+ /// \brief Whether any type-checking sanitizers are enabled. If \c false,
+ /// calls to EmitTypeCheck can be skipped.
+ bool SanitizePerformTypeCheck;
/// In ARC, whether we should autorelease the return value.
bool AutoreleaseResult;
@@ -795,8 +795,8 @@ public:
bool OldDidCallStackSave;
bool PerformCleanup;
- RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
- RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
+ void operator=(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
protected:
CodeGenFunction& CGF;
@@ -839,8 +839,8 @@ public:
SourceRange Range;
bool PopDebugStack;
- LexicalScope(const LexicalScope &); // DO NOT IMPLEMENT THESE
- LexicalScope &operator=(const LexicalScope &);
+ LexicalScope(const LexicalScope &) LLVM_DELETED_FUNCTION;
+ void operator=(const LexicalScope &) LLVM_DELETED_FUNCTION;
public:
/// \brief Enter a new cleanup scope.
@@ -908,7 +908,7 @@ public:
/// themselves).
void popCatchScope();
- llvm::BasicBlock *getEHResumeBlock();
+ llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
/// An object to manage conditionally-evaluated expressions.
@@ -1213,6 +1213,14 @@ public:
CodeGenTypes &getTypes() const { return CGM.getTypes(); }
ASTContext &getContext() const { return CGM.getContext(); }
+ /// Returns true if DebugInfo is actually initialized.
+ bool maybeInitializeDebugInfo() {
+ if (CGM.getModuleDebugInfo()) {
+ DebugInfo = CGM.getModuleDebugInfo();
+ return true;
+ }
+ return false;
+ }
CGDebugInfo *getDebugInfo() {
if (DisableDebugInfo)
return NULL;
@@ -1504,7 +1512,7 @@ public:
static bool hasAggregateLLVMType(QualType T);
/// createBasicBlock - Create an LLVM basic block.
- llvm::BasicBlock *createBasicBlock(StringRef name = "",
+ llvm::BasicBlock *createBasicBlock(const Twine &name = "",
llvm::Function *parent = 0,
llvm::BasicBlock *before = 0) {
#ifdef NDEBUG
@@ -1631,7 +1639,7 @@ public:
/// aggregate expression, the aggloc/agglocvolatile arguments indicate where
/// the result should be returned.
///
- /// \param IgnoreResult - True if the resulting value isn't used.
+ /// \param ignoreResult True if the resulting value isn't used.
RValue EmitAnyExpr(const Expr *E,
AggValueSlot aggSlot = AggValueSlot::ignored(),
bool ignoreResult = false);
@@ -1654,13 +1662,26 @@ public:
void EmitExprAsInit(const Expr *init, const ValueDecl *D,
LValue lvalue, bool capturedByInit);
+ /// EmitAggregateCopy - Emit an aggrate assignment.
+ ///
+ /// The difference to EmitAggregateCopy is that tail padding is not copied.
+ /// This is required for correctness when assigning non-POD structures in C++.
+ void EmitAggregateAssign(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType EltTy, bool isVolatile=false,
+ CharUnits Alignment = CharUnits::Zero()) {
+ EmitAggregateCopy(DestPtr, SrcPtr, EltTy, isVolatile, Alignment, true);
+ }
+
/// EmitAggregateCopy - Emit an aggrate copy.
///
/// \param isVolatile - True iff either the source or the destination is
/// volatile.
+ /// \param isAssignment - If false, allow padding to be copied. This often
+ /// yields more efficient.
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType EltTy, bool isVolatile=false,
- CharUnits Alignment = CharUnits::Zero());
+ CharUnits Alignment = CharUnits::Zero(),
+ bool isAssignment = false);
/// StartBlock - Start new block named N. If insert block is a dummy block
/// then reuse it.
@@ -1829,12 +1850,37 @@ public:
llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
+ llvm::Value* EmitCXXUuidofExpr(const CXXUuidofExpr *E);
void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
void EmitStdInitializerListCleanup(llvm::Value *loc,
const InitListExpr *init);
- void EmitCheck(llvm::Value *, unsigned Size);
+ /// \brief Situations in which we might emit a check for the suitability of a
+ /// pointer or glvalue.
+ enum TypeCheckKind {
+ /// Checking the operand of a load. Must be suitably sized and aligned.
+ TCK_Load,
+ /// Checking the destination of a store. Must be suitably sized and aligned.
+ TCK_Store,
+ /// Checking the bound value in a reference binding. Must be suitably sized
+ /// and aligned, but is not required to refer to an object (until the
+ /// reference is used), per core issue 453.
+ TCK_ReferenceBinding,
+ /// Checking the object expression in a non-static data member access. Must
+ /// be an object within its lifetime.
+ TCK_MemberAccess,
+ /// Checking the 'this' pointer for a call to a non-static member function.
+ /// Must be an object within its lifetime.
+ TCK_MemberCall,
+ /// Checking the 'this' pointer for a constructor call.
+ TCK_ConstructorCall
+ };
+
+ /// \brief Emit a check that \p V is the address of storage of the
+ /// appropriate size and alignment for an object of type \p Type.
+ void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
+ QualType Type, CharUnits Alignment = CharUnits::Zero());
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
@@ -1981,7 +2027,6 @@ public:
void EmitCaseStmt(const CaseStmt &S);
void EmitCaseStmtRange(const CaseStmt &S);
void EmitAsmStmt(const AsmStmt &S);
- void EmitMSAsmStmt(const MSAsmStmt &S);
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
@@ -2033,11 +2078,10 @@ public:
///
LValue EmitLValue(const Expr *E);
- /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
- /// checking code to guard against undefined behavior. This is only
- /// suitable when we know that the address will be used to access the
- /// object.
- LValue EmitCheckedLValue(const Expr *E);
+ /// \brief Same as EmitLValue but additionally we generate checking code to
+ /// guard against undefined behavior. This is only suitable when we know
+ /// that the address will be used to access the object.
+ LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
/// EmitToMemory - Change a scalar value from its value
/// representation to its in-memory representation.
@@ -2178,6 +2222,7 @@ public:
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
LValue EmitLambdaLValue(const LambdaExpr *E);
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
+ LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
@@ -2230,6 +2275,7 @@ public:
const CXXRecordDecl *RD);
RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
+ SourceLocation CallLoc,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
llvm::Value *This,
@@ -2310,6 +2356,7 @@ public:
llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
+ void EmitARCDestroyStrong(llvm::Value *addr, bool precise);
void EmitARCRelease(llvm::Value *value, bool precise);
llvm::Value *EmitARCAutorelease(llvm::Value *value);
llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
@@ -2516,9 +2563,29 @@ public:
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock);
- /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
- /// generate a branch around the created basic block as necessary.
- llvm::BasicBlock *getTrapBB();
+ /// \brief Emit a description of a type in a format suitable for passing to
+ /// a runtime sanitizer handler.
+ llvm::Constant *EmitCheckTypeDescriptor(QualType T);
+
+ /// \brief Convert a value into a format suitable for passing to a runtime
+ /// sanitizer handler.
+ llvm::Value *EmitCheckValue(llvm::Value *V);
+
+ /// \brief Emit a description of a source location in a format suitable for
+ /// passing to a runtime sanitizer handler.
+ llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
+
+ /// \brief Create a basic block that will call a handler function in a
+ /// sanitizer runtime with the provided arguments, and create a conditional
+ /// branch to it.
+ void EmitCheck(llvm::Value *Checked, StringRef CheckName,
+ llvm::ArrayRef<llvm::Constant *> StaticArgs,
+ llvm::ArrayRef<llvm::Value *> DynamicArgs,
+ bool Recoverable = false);
+
+ /// \brief Create a basic block that will call the trap intrinsic, and emit a
+ /// conditional branch to it, for the -ftrapv checks.
+ void EmitTrapvCheck(llvm::Value *Checked);
/// EmitCallArg - Emit a single call argument.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
@@ -2553,12 +2620,10 @@ private:
SmallVector<llvm::Value*, 16> &Args,
llvm::FunctionType *IRFuncTy);
- llvm::Value* EmitAsmInput(const AsmStmt &S,
- const TargetInfo::ConstraintInfo &Info,
+ llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr, std::string &ConstraintStr);
- llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
- const TargetInfo::ConstraintInfo &Info,
+ llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
LValue InputValue, QualType InputType,
std::string &ConstraintStr);
@@ -2624,15 +2689,9 @@ private:
void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
- /// GetPointeeAlignment - Given an expression with a pointer type, find the
- /// alignment of the type referenced by the pointer. Skip over implicit
- /// casts.
- unsigned GetPointeeAlignment(const Expr *Addr);
-
- /// GetPointeeAlignmentValue - Given an expression with a pointer type, find
- /// the alignment of the type referenced by the pointer. Skip over implicit
- /// casts. Return the alignment as an llvm::Value.
- llvm::Value *GetPointeeAlignmentValue(const Expr *Addr);
+ /// GetPointeeAlignment - Given an expression with a pointer type, emit the
+ /// value and compute our best estimate of the alignment of the pointee.
+ std::pair<llvm::Value*, unsigned> EmitPointerWithAlignment(const Expr *Addr);
};
/// Helper class with most of the code for saving a value for a
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 3ae3c52..17972e2 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -42,7 +42,7 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -62,10 +62,10 @@ static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
- llvm::Module &M, const llvm::TargetData &TD,
+ llvm::Module &M, const llvm::DataLayout &TD,
DiagnosticsEngine &diags)
: Context(C), LangOpts(C.getLangOpts()), CodeGenOpts(CGO), TheModule(M),
- TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
+ TheDataLayout(TD), TheTargetCodeGenInfo(0), Diags(diags),
ABI(createCXXABI(*this)),
Types(*this),
TBAA(0),
@@ -103,14 +103,14 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
createCUDARuntime();
// Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
- if (LangOpts.ThreadSanitizer ||
+ if (LangOpts.SanitizeThread ||
(!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
TBAA = new CodeGenTBAA(Context, VMContext, CodeGenOpts, getLangOpts(),
ABI.getMangleContext());
// If debug info or coverage generation is enabled, create the CGDebugInfo
// object.
- if (CodeGenOpts.DebugInfo != CodeGenOptions::NoDebugInfo ||
+ if (CodeGenOpts.getDebugInfo() != CodeGenOptions::NoDebugInfo ||
CodeGenOpts.EmitGcovArcs ||
CodeGenOpts.EmitGcovNotes)
DebugInfo = new CGDebugInfo(*this);
@@ -202,6 +202,12 @@ llvm::MDNode *CodeGenModule::getTBAAInfoForVTablePtr() {
return TBAA->getTBAAInfoForVTablePtr();
}
+llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAStructInfo(QTy);
+}
+
void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
llvm::MDNode *TBAAInfo) {
Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
@@ -287,7 +293,7 @@ void CodeGenModule::setTLSMode(llvm::GlobalVariable *GV,
assert(D.isThreadSpecified() && "setting TLS mode on non-TLS var!");
llvm::GlobalVariable::ThreadLocalMode TLM;
- TLM = GetLLVMTLSModel(CodeGenOpts.DefaultTLSModel);
+ TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
// Override the TLS model if it is explicitly specified.
if (D.hasAttr<TLSModelAttr>()) {
@@ -347,9 +353,7 @@ void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
// to deal with mixed-visibility symbols.
case TSK_ExplicitSpecialization:
case TSK_ImplicitInstantiation:
- if (!CodeGenOpts.HiddenWeakTemplateVTables)
- return;
- break;
+ return;
}
// If there's a key function, there may be translation units
@@ -529,7 +533,7 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
unsigned CallingConv;
AttributeListType AttributeList;
ConstructAttributeList(Info, D, AttributeList, CallingConv);
- F->setAttributes(llvm::AttrListPtr::get(AttributeList));
+ F->setAttributes(llvm::AttrListPtr::get(getLLVMContext(), AttributeList));
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
@@ -559,39 +563,46 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
F->setHasUWTable();
if (!hasUnwindExceptions(LangOpts))
- F->addFnAttr(llvm::Attribute::NoUnwind);
+ F->addFnAttr(llvm::Attributes::NoUnwind);
if (D->hasAttr<NakedAttr>()) {
// Naked implies noinline: we should not be inlining such functions.
- F->addFnAttr(llvm::Attribute::Naked);
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::Naked);
+ F->addFnAttr(llvm::Attributes::NoInline);
}
if (D->hasAttr<NoInlineAttr>())
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::NoInline);
// (noinline wins over always_inline, and we can't specify both in IR)
if ((D->hasAttr<AlwaysInlineAttr>() || D->hasAttr<ForceInlineAttr>()) &&
- !F->hasFnAttr(llvm::Attribute::NoInline))
- F->addFnAttr(llvm::Attribute::AlwaysInline);
+ !F->getFnAttributes().hasAttribute(llvm::Attributes::NoInline))
+ F->addFnAttr(llvm::Attributes::AlwaysInline);
// FIXME: Communicate hot and cold attributes to LLVM more directly.
if (D->hasAttr<ColdAttr>())
- F->addFnAttr(llvm::Attribute::OptimizeForSize);
+ F->addFnAttr(llvm::Attributes::OptimizeForSize);
+
+ if (D->hasAttr<MinSizeAttr>())
+ F->addFnAttr(llvm::Attributes::MinSize);
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
F->setUnnamedAddr(true);
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D))
+ if (MD->isVirtual())
+ F->setUnnamedAddr(true);
+
if (LangOpts.getStackProtector() == LangOptions::SSPOn)
- F->addFnAttr(llvm::Attribute::StackProtect);
+ F->addFnAttr(llvm::Attributes::StackProtect);
else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
- F->addFnAttr(llvm::Attribute::StackProtectReq);
+ F->addFnAttr(llvm::Attributes::StackProtectReq);
- if (LangOpts.AddressSanitizer) {
+ if (LangOpts.SanitizeAddress) {
// When AddressSanitizer is enabled, set AddressSafety attribute
// unless __attribute__((no_address_safety_analysis)) is used.
if (!D->hasAttr<NoAddressSafetyAnalysisAttr>())
- F->addFnAttr(llvm::Attribute::AddressSafety);
+ F->addFnAttr(llvm::Attributes::AddressSafety);
}
unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
@@ -636,7 +647,8 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
if (unsigned IID = F->getIntrinsicID()) {
// If this is an intrinsic function, set the function's attributes
// to the intrinsic's attributes.
- F->setAttributes(llvm::Intrinsic::getAttributes((llvm::Intrinsic::ID)IID));
+ F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(),
+ (llvm::Intrinsic::ID)IID));
return;
}
@@ -822,6 +834,49 @@ bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
return !getContext().DeclMustBeEmitted(Global);
}
+llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
+ const CXXUuidofExpr* E) {
+ // Sema has verified that IIDSource has a __declspec(uuid()), and that its
+ // well-formed.
+ StringRef Uuid;
+ if (E->isTypeOperand())
+ Uuid = CXXUuidofExpr::GetUuidAttrOfType(E->getTypeOperand())->getGuid();
+ else {
+ // Special case: __uuidof(0) means an all-zero GUID.
+ Expr *Op = E->getExprOperand();
+ if (!Op->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
+ Uuid = CXXUuidofExpr::GetUuidAttrOfType(Op->getType())->getGuid();
+ else
+ Uuid = "00000000-0000-0000-0000-000000000000";
+ }
+ std::string Name = "__uuid_" + Uuid.str();
+
+ // Look for an existing global.
+ if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
+ return GV;
+
+ llvm::Constant *Init = EmitUuidofInitializer(Uuid, E->getType());
+ assert(Init && "failed to initialize as constant");
+
+ // GUIDs are assumed to be 16 bytes, spread over 4-2-2-8 bytes. However, the
+ // first field is declared as "long", which for many targets is 8 bytes.
+ // Those architectures are not supported. (With the MS abi, long is always 4
+ // bytes.)
+ llvm::Type *GuidType = getTypes().ConvertType(E->getType());
+ if (Init->getType() != GuidType) {
+ DiagnosticsEngine &Diags = getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "__uuidof codegen is not supported on this architecture");
+ Diags.Report(E->getExprLoc(), DiagID) << E->getSourceRange();
+ Init = llvm::UndefValue::get(GuidType);
+ }
+
+ llvm::GlobalVariable *GV = new llvm::GlobalVariable(getModule(), GuidType,
+ /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Init, Name);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
const AliasAttr *AA = VD->getAttr<AliasAttr>();
assert(AA && "No alias?");
@@ -830,19 +885,23 @@ llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
// See if there is already something with the target's name in the module.
llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
+ if (Entry) {
+ unsigned AS = getContext().getTargetAddressSpace(VD->getType());
+ return llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
+ }
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
- Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy,
+ GlobalDecl(cast<FunctionDecl>(VD)),
/*ForVTable=*/false);
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
llvm::PointerType::getUnqual(DeclTy), 0);
- if (!Entry) {
- llvm::GlobalValue* F = cast<llvm::GlobalValue>(Aliasee);
- F->setLinkage(llvm::Function::ExternalWeakLinkage);
- WeakRefReferences.insert(F);
- }
+
+ llvm::GlobalValue* F = cast<llvm::GlobalValue>(Aliasee);
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ WeakRefReferences.insert(F);
return Aliasee;
}
@@ -1051,12 +1110,10 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
- if (WeakRefReferences.count(Entry)) {
+ if (WeakRefReferences.erase(Entry)) {
const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl());
if (FD && !FD->hasAttr<WeakAttr>())
Entry->setLinkage(llvm::Function::ExternalLinkage);
-
- WeakRefReferences.erase(Entry);
}
if (Entry->getType()->getElementType() == Ty)
@@ -1085,8 +1142,8 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
assert(F->getName() == MangledName && "name was uniqued!");
if (D.getDecl())
SetFunctionAttributes(D, F, IsIncompleteFunction);
- if (ExtraAttrs != llvm::Attribute::None)
- F->addFnAttr(ExtraAttrs);
+ if (ExtraAttrs.hasAttributes())
+ F->addAttribute(llvm::AttrListPtr::FunctionIndex, ExtraAttrs);
// This is the first use or definition of a mangled name. If there is a
// deferred decl with this name, remember that we need to emit it at the end
@@ -1197,11 +1254,9 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
- if (WeakRefReferences.count(Entry)) {
+ if (WeakRefReferences.erase(Entry)) {
if (D && !D->hasAttr<WeakAttr>())
Entry->setLinkage(llvm::Function::ExternalLinkage);
-
- WeakRefReferences.erase(Entry);
}
if (UnnamedAddr)
@@ -1279,7 +1334,7 @@ CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
// Because C++ name mangling, the only way we can end up with an already
// existing global with the same name is if it has been declared extern "C".
- assert(GV->isDeclaration() && "Declaration has wrong type!");
+ assert(GV->isDeclaration() && "Declaration has wrong type!");
OldGV = GV;
}
@@ -1424,7 +1479,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
return Context.toCharUnitsFromBits(
- TheTargetData.getTypeStoreSizeInBits(Ty));
+ TheDataLayout.getTypeStoreSizeInBits(Ty));
}
llvm::Constant *
@@ -1473,10 +1528,10 @@ CodeGenModule::MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
// Now clone the InitListExpr to initialize the array instead.
// Incredible hack: we want to use the existing InitListExpr here, so we need
// to tell it that it no longer initializes a std::initializer_list.
- Expr *arrayInit = new (ctx) InitListExpr(ctx, init->getLBraceLoc(),
- const_cast<InitListExpr*>(init)->getInits(),
- init->getNumInits(),
- init->getRBraceLoc());
+ ArrayRef<Expr*> Inits(const_cast<InitListExpr*>(init)->getInits(),
+ init->getNumInits());
+ Expr *arrayInit = new (ctx) InitListExpr(ctx, init->getLBraceLoc(), Inits,
+ init->getRBraceLoc());
arrayInit->setType(arrayType);
if (!cleanups.empty())
@@ -1682,9 +1737,21 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
if (NeedsGlobalCtor || NeedsGlobalDtor)
EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
+ // If we are compiling with ASan, add metadata indicating dynamically
+ // initialized globals.
+ if (LangOpts.SanitizeAddress && NeedsGlobalCtor) {
+ llvm::Module &M = getModule();
+
+ llvm::NamedMDNode *DynamicInitializers =
+ M.getOrInsertNamedMetadata("llvm.asan.dynamically_initialized_globals");
+ llvm::Value *GlobalToAdd[] = { GV };
+ llvm::MDNode *ThisGlobal = llvm::MDNode::get(VMContext, GlobalToAdd);
+ DynamicInitializers->addOperand(ThisGlobal);
+ }
+
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
- if (getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo)
+ if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
DI->EmitGlobalVariable(GV, D);
}
@@ -1758,8 +1825,10 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
llvm::Attributes RAttrs = AttrList.getRetAttributes();
// Add the return attributes.
- if (RAttrs)
- AttrVec.push_back(llvm::AttributeWithIndex::get(0, RAttrs));
+ if (RAttrs.hasAttributes())
+ AttrVec.push_back(llvm::
+ AttributeWithIndex::get(llvm::AttrListPtr::ReturnIndex,
+ RAttrs));
// If the function was passed too few arguments, don't transform. If extra
// arguments were passed, we silently drop them. If any of the types
@@ -1775,14 +1844,18 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
}
// Add any parameter attributes.
- if (llvm::Attributes PAttrs = AttrList.getParamAttributes(ArgNo + 1))
+ llvm::Attributes PAttrs = AttrList.getParamAttributes(ArgNo + 1);
+ if (PAttrs.hasAttributes())
AttrVec.push_back(llvm::AttributeWithIndex::get(ArgNo + 1, PAttrs));
}
if (DontTransform)
continue;
- if (llvm::Attributes FnAttrs = AttrList.getFnAttributes())
- AttrVec.push_back(llvm::AttributeWithIndex::get(~0, FnAttrs));
+ llvm::Attributes FnAttrs = AttrList.getFnAttributes();
+ if (FnAttrs.hasAttributes())
+ AttrVec.push_back(llvm::
+ AttributeWithIndex::get(llvm::AttrListPtr::FunctionIndex,
+ FnAttrs));
// Okay, we can transform this. Create the new call instruction and copy
// over the required information.
@@ -1791,7 +1864,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
ArgList.clear();
if (!NewCall->getType()->isVoidTy())
NewCall->takeName(CI);
- NewCall->setAttributes(llvm::AttrListPtr::get(AttrVec));
+ NewCall->setAttributes(llvm::AttrListPtr::get(OldFn->getContext(), AttrVec));
NewCall->setCallingConv(CI->getCallingConv());
// Finally, remove the old call, replacing any uses with the new one.
@@ -1911,7 +1984,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
// if a deferred decl.
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
- Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD,
/*ForVTable=*/false);
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
@@ -1987,7 +2060,7 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
IsUTF16 = true;
SmallVector<UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
- const UTF8 *FromPtr = (UTF8 *)String.data();
+ const UTF8 *FromPtr = (const UTF8 *)String.data();
UTF16 *ToPtr = &ToBuf[0];
(void)ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
@@ -2019,7 +2092,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
bool isUTF16 = false;
llvm::StringMapEntry<llvm::Constant*> &Entry =
GetConstantCFStringEntry(CFConstantStringMap, Literal,
- getTargetData().isLittleEndian(),
+ getDataLayout().isLittleEndian(),
isUTF16, StringLength);
if (llvm::Constant *C = Entry.getValue())
@@ -2429,7 +2502,7 @@ void CodeGenModule::EmitObjCPropertyImplementations(const
ObjCPropertyDecl *PD = PID->getPropertyDecl();
// Determine which methods need to be implemented, some may have
- // been overridden. Note that ::isSynthesized is not the method
+ // been overridden. Note that ::isPropertyAccessor is not the method
// we want, that just indicates if the decl came from a
// property. What we want to know is if the method is defined in
// this implementation.
@@ -2465,11 +2538,11 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
ObjCMethodDecl::Create(getContext(), D->getLocation(), D->getLocation(),
cxxSelector, getContext().VoidTy, 0, D,
/*isInstance=*/true, /*isVariadic=*/false,
- /*isSynthesized=*/true, /*isImplicitlyDeclared=*/true,
+ /*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true,
/*isDefined=*/false, ObjCMethodDecl::Required);
D->addInstanceMethod(DTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
- D->setHasCXXStructors(true);
+ D->setHasDestructors(true);
}
// If the implementation doesn't have any ivar initializers, we don't need
@@ -2487,13 +2560,13 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
getContext().getObjCIdType(), 0,
D, /*isInstance=*/true,
/*isVariadic=*/false,
- /*isSynthesized=*/true,
+ /*isPropertyAccessor=*/true,
/*isImplicitlyDeclared=*/true,
/*isDefined=*/false,
ObjCMethodDecl::Required);
D->addInstanceMethod(CTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
- D->setHasCXXStructors(true);
+ D->setHasNonZeroConstructors(true);
}
/// EmitNamespace - Emit all declarations in a namespace.
@@ -2512,8 +2585,17 @@ void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
}
for (RecordDecl::decl_iterator I = LSD->decls_begin(), E = LSD->decls_end();
- I != E; ++I)
+ I != E; ++I) {
+ // Meta-data for ObjC class includes references to implemented methods.
+ // Generate class's method definitions first.
+ if (ObjCImplDecl *OID = dyn_cast<ObjCImplDecl>(*I)) {
+ for (ObjCContainerDecl::method_iterator M = OID->meth_begin(),
+ MEnd = OID->meth_end();
+ M != MEnd; ++M)
+ EmitTopLevelDecl(*M);
+ }
EmitTopLevelDecl(*I);
+ }
}
/// EmitTopLevelDecl - Emit code for a single top level declaration.
@@ -2737,3 +2819,32 @@ void CodeGenModule::EmitCoverageFile() {
}
}
}
+
+llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid,
+ QualType GuidType) {
+ // Sema has checked that all uuid strings are of the form
+ // "12345678-1234-1234-1234-1234567890ab".
+ assert(Uuid.size() == 36);
+ const char *Uuidstr = Uuid.data();
+ for (int i = 0; i < 36; ++i) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) assert(Uuidstr[i] == '-');
+ else assert(isxdigit(Uuidstr[i]));
+ }
+
+ llvm::APInt Field0(32, StringRef(Uuidstr , 8), 16);
+ llvm::APInt Field1(16, StringRef(Uuidstr + 9, 4), 16);
+ llvm::APInt Field2(16, StringRef(Uuidstr + 14, 4), 16);
+ static const int Field3ValueOffsets[] = { 19, 21, 24, 26, 28, 30, 32, 34 };
+
+ APValue InitStruct(APValue::UninitStruct(), /*NumBases=*/0, /*NumFields=*/4);
+ InitStruct.getStructField(0) = APValue(llvm::APSInt(Field0));
+ InitStruct.getStructField(1) = APValue(llvm::APSInt(Field1));
+ InitStruct.getStructField(2) = APValue(llvm::APSInt(Field2));
+ APValue& Arr = InitStruct.getStructField(3);
+ Arr = APValue(APValue::UninitArray(), 8, 8);
+ for (int t = 0; t < 8; ++t)
+ Arr.getArrayInitializedElt(t) = APValue(llvm::APSInt(
+ llvm::APInt(8, StringRef(Uuidstr + Field3ValueOffsets[t], 2), 16)));
+
+ return EmitConstantValue(InitStruct, GuidType);
+}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index d6ff50d..1167c87 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -35,7 +35,7 @@ namespace llvm {
class ConstantInt;
class Function;
class GlobalValue;
- class TargetData;
+ class DataLayout;
class FunctionType;
class LLVMContext;
}
@@ -210,8 +210,8 @@ struct ARCEntrypoints {
/// CodeGenModule - This class organizes the cross-function state that is used
/// while generating LLVM code.
class CodeGenModule : public CodeGenTypeCache {
- CodeGenModule(const CodeGenModule&); // DO NOT IMPLEMENT
- void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
+ CodeGenModule(const CodeGenModule &) LLVM_DELETED_FUNCTION;
+ void operator=(const CodeGenModule &) LLVM_DELETED_FUNCTION;
typedef std::vector<std::pair<llvm::Constant*, int> > CtorList;
@@ -219,7 +219,7 @@ class CodeGenModule : public CodeGenTypeCache {
const LangOptions &LangOpts;
const CodeGenOptions &CodeGenOpts;
llvm::Module &TheModule;
- const llvm::TargetData &TheTargetData;
+ const llvm::DataLayout &TheDataLayout;
mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
DiagnosticsEngine &Diags;
CGCXXABI &ABI;
@@ -296,11 +296,18 @@ class CodeGenModule : public CodeGenTypeCache {
/// order.
llvm::DenseMap<const Decl*, unsigned> DelayedCXXInitPosition;
+ typedef std::pair<OrderGlobalInits, llvm::Function*> GlobalInitData;
+
+ struct GlobalInitPriorityCmp {
+ bool operator()(const GlobalInitData &LHS,
+ const GlobalInitData &RHS) const {
+ return LHS.first.priority < RHS.first.priority;
+ }
+ };
+
/// - Global variables with initializers whose order of initialization
/// is set by init_priority attribute.
-
- SmallVector<std::pair<OrderGlobalInits, llvm::Function*>, 8>
- PrioritizedCXXGlobalInits;
+ SmallVector<GlobalInitData, 8> PrioritizedCXXGlobalInits;
/// CXXGlobalDtors - Global destructor functions and arguments that need to
/// run on termination.
@@ -357,7 +364,7 @@ class CodeGenModule : public CodeGenTypeCache {
/// @}
public:
CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
- llvm::Module &M, const llvm::TargetData &TD,
+ llvm::Module &M, const llvm::DataLayout &TD,
DiagnosticsEngine &Diags);
~CodeGenModule();
@@ -451,7 +458,7 @@ public:
CodeGenVTables &getVTables() { return VTables; }
VTableContext &getVTableContext() { return VTables.getVTableContext(); }
DiagnosticsEngine &getDiags() const { return Diags; }
- const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
const TargetInfo &getTarget() const { return Context.getTargetInfo(); }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
const TargetCodeGenInfo &getTargetCodeGenInfo();
@@ -461,6 +468,7 @@ public:
llvm::MDNode *getTBAAInfo(QualType QTy);
llvm::MDNode *getTBAAInfoForVTablePtr();
+ llvm::MDNode *getTBAAStructInfo(QualType QTy);
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor);
@@ -548,6 +556,9 @@ public:
/// for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
+ /// GetAddrOfUuidDescriptor - Get the address of a uuid descriptor .
+ llvm::Constant *GetAddrOfUuidDescriptor(const CXXUuidofExpr* E);
+
/// GetAddrOfThunk - Get the address of the thunk for the given global decl.
llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
@@ -701,7 +712,7 @@ public:
llvm::Constant *CreateRuntimeFunction(llvm::FunctionType *Ty,
StringRef Name,
llvm::Attributes ExtraAttrs =
- llvm::Attribute::None);
+ llvm::Attributes());
/// CreateRuntimeVariable - Create a new runtime global variable with the
/// specified type and name.
llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty,
@@ -880,7 +891,7 @@ private:
GlobalDecl D,
bool ForVTable,
llvm::Attributes ExtraAttrs =
- llvm::Attribute::None);
+ llvm::Attributes());
llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::PointerType *PTy,
const VarDecl *D,
@@ -984,6 +995,9 @@ private:
/// to emit the .gcno and .gcda files in a way that persists in .bc files.
void EmitCoverageFile();
+ /// Emits the initializer for a uuidof string.
+ llvm::Constant *EmitUuidofInitializer(StringRef uuidstr, QualType IIDType);
+
/// MayDeferGeneration - Determine if the given decl can be emitted
/// lazily; this is only relevant for definitions. The given decl
/// must be either a function or var decl.
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index bab60af..d9004a0 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -17,6 +17,7 @@
#include "CodeGenTBAA.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/AST/Mangle.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/LLVMContext.h"
@@ -167,3 +168,59 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
llvm::MDNode *CodeGenTBAA::getTBAAInfoForVTablePtr() {
return MDHelper.createTBAANode("vtable pointer", getRoot());
}
+
+bool
+CodeGenTBAA::CollectFields(uint64_t BaseOffset,
+ QualType QTy,
+ SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &
+ Fields,
+ bool MayAlias) {
+ /* Things not handled yet include: C++ base classes, bitfields, */
+
+ if (const RecordType *TTy = QTy->getAs<RecordType>()) {
+ const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // TODO: Handle C++ base classes.
+ if (const CXXRecordDecl *Decl = dyn_cast<CXXRecordDecl>(RD))
+ if (Decl->bases_begin() != Decl->bases_end())
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i, ++idx) {
+ uint64_t Offset = BaseOffset +
+ Layout.getFieldOffset(idx) / Context.getCharWidth();
+ QualType FieldQTy = i->getType();
+ if (!CollectFields(Offset, FieldQTy, Fields,
+ MayAlias || TypeHasMayAlias(FieldQTy)))
+ return false;
+ }
+ return true;
+ }
+
+ /* Otherwise, treat whatever it is as a field. */
+ uint64_t Offset = BaseOffset;
+ uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
+ llvm::MDNode *TBAAInfo = MayAlias ? getChar() : getTBAAInfo(QTy);
+ Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAAInfo));
+ return true;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAStructInfo(QualType QTy) {
+ const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
+
+ if (llvm::MDNode *N = StructMetadataCache[Ty])
+ return N;
+
+ SmallVector<llvm::MDBuilder::TBAAStructField, 4> Fields;
+ if (CollectFields(0, QTy, Fields, TypeHasMayAlias(QTy)))
+ return MDHelper.createTBAAStructNode(Fields);
+
+ // For now, handle any other kind of type conservatively.
+ return StructMetadataCache[Ty] = NULL;
+}
diff --git a/lib/CodeGen/CodeGenTBAA.h b/lib/CodeGen/CodeGenTBAA.h
index c17a5cf..eedb996 100644
--- a/lib/CodeGen/CodeGenTBAA.h
+++ b/lib/CodeGen/CodeGenTBAA.h
@@ -49,6 +49,10 @@ class CodeGenTBAA {
/// MetadataCache - This maps clang::Types to llvm::MDNodes describing them.
llvm::DenseMap<const Type *, llvm::MDNode *> MetadataCache;
+ /// StructMetadataCache - This maps clang::Types to llvm::MDNodes describing
+ /// them for struct assignments.
+ llvm::DenseMap<const Type *, llvm::MDNode *> StructMetadataCache;
+
llvm::MDNode *Root;
llvm::MDNode *Char;
@@ -60,6 +64,13 @@ class CodeGenTBAA {
/// considered to be equivalent to it.
llvm::MDNode *getChar();
+ /// CollectFields - Collect information about the fields of a type for
+ /// !tbaa.struct metadata formation. Return false for an unsupported type.
+ bool CollectFields(uint64_t BaseOffset,
+ QualType Ty,
+ SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &Fields,
+ bool MayAlias);
+
public:
CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
const CodeGenOptions &CGO,
@@ -74,6 +85,10 @@ public:
/// getTBAAInfoForVTablePtr - Get the TBAA MDNode to be used for a
/// dereference of a vtable pointer.
llvm::MDNode *getTBAAInfoForVTablePtr();
+
+ /// getTBAAStructInfo - Get the TBAAStruct MDNode to be used for a memcpy of
+ /// the given type.
+ llvm::MDNode *getTBAAStructInfo(QualType QTy);
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 9a78dae..3c6c5c9 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -23,13 +23,13 @@
#include "clang/AST/RecordLayout.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace clang;
using namespace CodeGen;
CodeGenTypes::CodeGenTypes(CodeGenModule &CGM)
: Context(CGM.getContext()), Target(Context.getTargetInfo()),
- TheModule(CGM.getModule()), TheTargetData(CGM.getTargetData()),
+ TheModule(CGM.getModule()), TheDataLayout(CGM.getDataLayout()),
TheABIInfo(CGM.getTargetCodeGenInfo().getABIInfo()),
TheCXXABI(CGM.getCXXABI()),
CodeGenOpts(CGM.getCodeGenOpts()), CGM(CGM) {
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 3c29d2d..0519911 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -23,7 +23,7 @@
namespace llvm {
class FunctionType;
class Module;
- class TargetData;
+ class DataLayout;
class Type;
class LLVMContext;
class StructType;
@@ -62,7 +62,7 @@ class CodeGenTypes {
ASTContext &Context;
const TargetInfo &Target;
llvm::Module &TheModule;
- const llvm::TargetData &TheTargetData;
+ const llvm::DataLayout &TheDataLayout;
const ABIInfo &TheABIInfo;
CGCXXABI &TheCXXABI;
const CodeGenOptions &CodeGenOpts;
@@ -108,7 +108,7 @@ public:
CodeGenTypes(CodeGenModule &CGM);
~CodeGenTypes();
- const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
const TargetInfo &getTarget() const { return Target; }
ASTContext &getContext() const { return Context; }
const ABIInfo &getABIInfo() const { return TheABIInfo; }
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 0b7ce36..245150c 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -23,11 +23,11 @@
#include "CGVTables.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include <clang/AST/Mangle.h>
-#include <clang/AST/Type.h>
-#include <llvm/Intrinsics.h>
-#include <llvm/Target/TargetData.h>
-#include <llvm/Value.h>
+#include "clang/AST/Mangle.h"
+#include "clang/AST/Type.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Value.h"
using namespace clang;
using namespace CodeGen;
@@ -92,6 +92,10 @@ public:
llvm::Value *Addr,
const MemberPointerType *MPT);
+ llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ QualType type);
+
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType T,
CanQualType &ResTy,
@@ -109,6 +113,7 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
StringRef GetPureVirtualCallName() { return "__cxa_pure_virtual"; }
+ StringRef GetDeletedVirtualCallName() { return "__cxa_deleted_virtual"; }
CharUnits getArrayCookieSizeImpl(QualType elementType);
llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
@@ -299,7 +304,7 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
CGBuilderTy &Builder = CGF.Builder;
- unsigned AS = cast<llvm::PointerType>(Base->getType())->getAddressSpace();
+ unsigned AS = Base->getType()->getPointerAddressSpace();
// Cast to char*.
Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
@@ -677,6 +682,25 @@ bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
return MPT->getPointeeType()->isFunctionType();
}
+/// The Itanium ABI always places an offset to the complete object
+/// at entry -2 in the vtable.
+llvm::Value *ItaniumCXXABI::adjustToCompleteObject(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ QualType type) {
+ // Grab the vtable pointer as an intptr_t*.
+ llvm::Value *vtable = CGF.GetVTablePtr(ptr, CGF.IntPtrTy->getPointerTo());
+
+ // Track back to entry -2 and pull out the offset there.
+ llvm::Value *offsetPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(vtable, -2, "complete-offset.ptr");
+ llvm::LoadInst *offset = CGF.Builder.CreateLoad(offsetPtr);
+ offset->setAlignment(CGF.PointerAlignInBytes);
+
+ // Apply the offset.
+ ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
+ return CGF.Builder.CreateInBoundsGEP(ptr, offset);
+}
+
/// The generic ABI passes 'this', plus a VTT if it's initializing a
/// base subobject.
void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
@@ -810,7 +834,7 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
QualType ElementType) {
assert(requiresArrayCookie(expr));
- unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+ unsigned AS = NewPtr->getType()->getPointerAddressSpace();
ASTContext &Ctx = getContext();
QualType SizeTy = Ctx.getSizeType();
@@ -852,7 +876,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr,
numElementsOffset.getQuantity());
- unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ unsigned AS = allocPtr->getType()->getPointerAddressSpace();
numElementsPtr =
CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
return CGF.Builder.CreateLoad(numElementsPtr);
@@ -878,7 +902,7 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
// NewPtr is a char*.
- unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+ unsigned AS = NewPtr->getType()->getPointerAddressSpace();
ASTContext &Ctx = getContext();
CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType());
@@ -913,7 +937,7 @@ llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
llvm::Value *numElementsPtr
= CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes);
- unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ unsigned AS = allocPtr->getType()->getPointerAddressSpace();
numElementsPtr =
CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
return CGF.Builder.CreateLoad(numElementsPtr);
@@ -927,9 +951,9 @@ static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
GuardPtrTy, /*isVarArg=*/false);
-
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
- llvm::Attribute::NoUnwind);
+ llvm::Attributes::get(CGM.getLLVMContext(),
+ llvm::Attributes::NoUnwind));
}
static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
@@ -937,9 +961,9 @@ static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
// void __cxa_guard_release(__guard *guard_object);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
-
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
- llvm::Attribute::NoUnwind);
+ llvm::Attributes::get(CGM.getLLVMContext(),
+ llvm::Attributes::NoUnwind));
}
static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
@@ -947,9 +971,9 @@ static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
// void __cxa_guard_abort(__guard *guard_object);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
-
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
- llvm::Attribute::NoUnwind);
+ llvm::Attributes::get(CGM.getLLVMContext(),
+ llvm::Attributes::NoUnwind));
}
namespace {
@@ -1149,7 +1173,7 @@ void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
// In Apple kexts, we want to add a global destructor entry.
// FIXME: shouldn't this be guarded by some variable?
- if (CGM.getContext().getLangOpts().AppleKext) {
+ if (CGM.getLangOpts().AppleKext) {
// Generate a global destructor entry.
return CGM.AddCXXDtorEntry(dtor, addr);
}
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 6a2925b..8d205c3 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -29,14 +29,18 @@ public:
MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM) {}
StringRef GetPureVirtualCallName() { return "_purecall"; }
+ // No known support for deleted functions in MSVC yet, so this choice is
+ // arbitrary.
+ StringRef GetDeletedVirtualCallName() { return "_purecall"; }
+
+ llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ QualType type);
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType Type,
CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) {
- // 'this' is already in place
- // TODO: 'for base' flag
- }
+ SmallVectorImpl<CanQualType> &ArgTys);
void BuildDestructorSignature(const CXXDestructorDecl *Ctor,
CXXDtorType Type,
@@ -48,15 +52,9 @@ public:
void BuildInstanceFunctionParams(CodeGenFunction &CGF,
QualType &ResTy,
- FunctionArgList &Params) {
- BuildThisParam(CGF, Params);
- // TODO: 'for base' flag
- }
+ FunctionArgList &Params);
- void EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
- EmitThisParam(CGF);
- // TODO: 'for base' flag
- }
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr,
@@ -99,10 +97,49 @@ public:
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
llvm::Value *allocPtr,
CharUnits cookieSize);
+ static bool needThisReturn(GlobalDecl GD);
};
}
+llvm::Value *MicrosoftCXXABI::adjustToCompleteObject(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ QualType type) {
+ // FIXME: implement
+ return ptr;
+}
+
+bool MicrosoftCXXABI::needThisReturn(GlobalDecl GD) {
+ const CXXMethodDecl* MD = cast<CXXMethodDecl>(GD.getDecl());
+ return isa<CXXConstructorDecl>(MD);
+}
+
+void MicrosoftCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ // 'this' is already in place
+ // TODO: 'for base' flag
+ // Ctor returns this ptr
+ ResTy = ArgTys[0];
+}
+
+void MicrosoftCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ BuildThisParam(CGF, Params);
+ if (needThisReturn(CGF.CurGD)) {
+ ResTy = Params[0]->getType();
+ }
+}
+
+void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ EmitThisParam(CGF);
+ if (needThisReturn(CGF.CurGD)) {
+ CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
+ }
+}
+
bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
QualType elementType) {
// Microsoft seems to completely ignore the possibility of a
@@ -127,7 +164,7 @@ CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
llvm::Value *allocPtr,
CharUnits cookieSize) {
- unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ unsigned AS = allocPtr->getType()->getPointerAddressSpace();
llvm::Value *numElementsPtr =
CGF.Builder.CreateBitCast(allocPtr, CGF.SizeTy->getPointerTo(AS));
return CGF.Builder.CreateLoad(numElementsPtr);
@@ -147,7 +184,7 @@ llvm::Value* MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *cookiePtr = newPtr;
// Write the number of elements into the appropriate slot.
- unsigned AS = cast<llvm::PointerType>(newPtr->getType())->getAddressSpace();
+ unsigned AS = newPtr->getType()->getPointerAddressSpace();
llvm::Value *numElementsPtr
= CGF.Builder.CreateBitCast(cookiePtr, CGF.SizeTy->getPointerTo(AS));
CGF.Builder.CreateStore(numElements, numElementsPtr);
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index ea2389e..0125559 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -21,14 +21,14 @@
#include "clang/Basic/TargetInfo.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/OwningPtr.h"
using namespace clang;
namespace {
class CodeGeneratorImpl : public CodeGenerator {
DiagnosticsEngine &Diags;
- OwningPtr<const llvm::TargetData> TD;
+ OwningPtr<const llvm::DataLayout> TD;
ASTContext *Ctx;
const CodeGenOptions CodeGenOpts; // Intentionally copied in.
protected:
@@ -54,7 +54,7 @@ namespace {
M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
M->setDataLayout(Ctx->getTargetInfo().getTargetDescription());
- TD.reset(new llvm::TargetData(Ctx->getTargetInfo().getTargetDescription()));
+ TD.reset(new llvm::DataLayout(Ctx->getTargetInfo().getTargetDescription()));
Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts,
*M, *TD, Diags));
}
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 9c23ed9..ffff0d0 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -18,7 +18,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Type.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -51,8 +51,8 @@ llvm::LLVMContext &ABIInfo::getVMContext() const {
return CGT.getLLVMContext();
}
-const llvm::TargetData &ABIInfo::getTargetData() const {
- return CGT.getTargetData();
+const llvm::DataLayout &ABIInfo::getDataLayout() const {
+ return CGT.getDataLayout();
}
@@ -389,6 +389,90 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+//===----------------------------------------------------------------------===//
+// le32/PNaCl bitcode ABI Implementation
+//===----------------------------------------------------------------------===//
+
+class PNaClABIInfo : public ABIInfo {
+ public:
+ PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
+ public:
+ PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
+};
+
+void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0;
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, FreeRegs);
+ }
+
+llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty,
+ unsigned &FreeRegs) const {
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ FreeRegs = 0;
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+
+ // Regparm regs hold 32 bits.
+ unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (SizeInRegs == 0) return BaseInfo;
+ if (SizeInRegs > FreeRegs) {
+ FreeRegs = 0;
+ return BaseInfo;
+ }
+ FreeRegs -= SizeInRegs;
+ return BaseInfo.isDirect() ?
+ ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) :
+ ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType());
+}
+
+ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
/// UseX86_MMXType - Return true if this is an MMX type that should use the
/// special x86_mmx type.
bool UseX86_MMXType(llvm::Type *IRType) {
@@ -435,7 +519,8 @@ class X86_32ABIInfo : public ABIInfo {
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal,
+ unsigned &FreeRegs) const;
/// \brief Return the alignment to use for the given type on the stack.
unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
@@ -443,9 +528,10 @@ class X86_32ABIInfo : public ABIInfo {
Class classify(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy,
unsigned callingConvention) const;
- ABIArgInfo classifyArgumentTypeWithReg(QualType RetTy,
- unsigned &FreeRegs) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs,
+ bool IsFastCall) const;
+ bool shouldUseInReg(QualType Ty, unsigned &FreeRegs,
+ bool IsFastCall, bool &NeedsPadding) const;
public:
@@ -682,9 +768,15 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
return MinABIStackAlignInBytes;
}
-ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
- if (!ByVal)
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
+ unsigned &FreeRegs) const {
+ if (!ByVal) {
+ if (FreeRegs) {
+ --FreeRegs; // Non byval indirects just use one pointer.
+ return ABIArgInfo::getIndirectInReg(0, false);
+ }
return ABIArgInfo::getIndirect(0, false);
+ }
// Compute the byval alignment.
unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
@@ -714,45 +806,51 @@ X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
return Integer;
}
-ABIArgInfo
-X86_32ABIInfo::classifyArgumentTypeWithReg(QualType Ty,
- unsigned &FreeRegs) const {
- // Common case first.
- if (FreeRegs == 0)
- return classifyArgumentType(Ty);
-
+bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs,
+ bool IsFastCall, bool &NeedsPadding) const {
+ NeedsPadding = false;
Class C = classify(Ty);
if (C == Float)
- return classifyArgumentType(Ty);
+ return false;
+
+ unsigned Size = getContext().getTypeSize(Ty);
+ unsigned SizeInRegs = (Size + 31) / 32;
- unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
if (SizeInRegs == 0)
- return classifyArgumentType(Ty);
+ return false;
if (SizeInRegs > FreeRegs) {
FreeRegs = 0;
- return classifyArgumentType(Ty);
+ return false;
}
- assert(SizeInRegs >= 1 && SizeInRegs <= 3);
+
FreeRegs -= SizeInRegs;
- // If it is a simple scalar, keep the type so that we produce a cleaner IR.
- ABIArgInfo Foo = classifyArgumentType(Ty);
- if (Foo.isDirect() && !Foo.getDirectOffset() && !Foo.getPaddingType())
- return ABIArgInfo::getDirectInReg(Foo.getCoerceToType());
- if (Foo.isExtend())
- return ABIArgInfo::getExtendInReg(Foo.getCoerceToType());
+ if (IsFastCall) {
+ if (Size > 32)
+ return false;
+
+ if (Ty->isIntegralOrEnumerationType())
+ return true;
+
+ if (Ty->isPointerType())
+ return true;
+
+ if (Ty->isReferenceType())
+ return true;
+
+ if (FreeRegs)
+ NeedsPadding = true;
- llvm::LLVMContext &LLVMContext = getVMContext();
- llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext);
- SmallVector<llvm::Type*, 3> Elements;
- for (unsigned I = 0; I < SizeInRegs; ++I)
- Elements.push_back(Int32);
- llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
- return ABIArgInfo::getDirectInReg(Result);
+ return false;
+ }
+
+ return true;
}
-ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
+ unsigned &FreeRegs,
+ bool IsFastCall) const {
// FIXME: Set alignment on indirect arguments.
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
@@ -760,25 +858,38 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (hasNonTrivialDestructorOrCopyConstructor(RT))
- return getIndirectResult(Ty, /*ByVal=*/false);
+ return getIndirectResult(Ty, false, FreeRegs);
if (RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectResult(Ty);
+ return getIndirectResult(Ty, true, FreeRegs);
}
// Ignore empty structs/unions.
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
+ llvm::LLVMContext &LLVMContext = getVMContext();
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ bool NeedsPadding;
+ if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) {
+ unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ SmallVector<llvm::Type*, 3> Elements;
+ for (unsigned I = 0; I < SizeInRegs; ++I)
+ Elements.push_back(Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+ return ABIArgInfo::getDirectInReg(Result);
+ }
+ llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0;
+
// Expand small (<= 128-bit) record types when we know that the stack layout
// of those arguments will match the struct. This is important because the
// LLVM backend isn't smart enough to remove byval, which inhibits many
// optimizations.
if (getContext().getTypeSize(Ty) <= 4*32 &&
canExpandIndirectArgument(Ty, getContext()))
- return ABIArgInfo::getExpand();
+ return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType);
- return getIndirectResult(Ty);
+ return getIndirectResult(Ty, true, FreeRegs);
}
if (const VectorType *VT = Ty->getAs<VectorType>()) {
@@ -809,16 +920,32 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ bool NeedsPadding;
+ bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding);
+
+ if (Ty->isPromotableIntegerType()) {
+ if (InReg)
+ return ABIArgInfo::getExtendInReg();
+ return ABIArgInfo::getExtend();
+ }
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
}
void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
FI.getCallingConvention());
- unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() :
- DefaultNumRegisterParameters;
+ unsigned CC = FI.getCallingConvention();
+ bool IsFastCall = CC == llvm::CallingConv::X86_FastCall;
+ unsigned FreeRegs;
+ if (IsFastCall)
+ FreeRegs = 2;
+ else if (FI.getHasRegParm())
+ FreeRegs = FI.getRegParm();
+ else
+ FreeRegs = DefaultNumRegisterParameters;
// If the return value is indirect, then the hidden argument is consuming one
// integer register.
@@ -832,7 +959,7 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
- it->info = classifyArgumentTypeWithReg(it->type, FreeRegs);
+ it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall);
}
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -884,7 +1011,10 @@ void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
llvm::Function *Fn = cast<llvm::Function>(GV);
// Now add the 'alignstack' attribute with a value of 16.
- Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
+ llvm::AttrBuilder B;
+ B.addStackAlignmentAttr(16);
+ Fn->addAttribute(llvm::AttrListPtr::FunctionIndex,
+ llvm::Attributes::get(CGM.getLLVMContext(), B));
}
}
}
@@ -1030,10 +1160,15 @@ class X86_64ABIInfo : public ABIInfo {
}
bool HasAVX;
+ // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
+ // 64-bit hardware.
+ bool Has64BitPointers;
public:
X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
- ABIInfo(CGT), HasAVX(hasavx) {}
+ ABIInfo(CGT), HasAVX(hasavx),
+ Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
+ }
bool isPassedUsingAVXType(QualType type) const {
unsigned neededInt, neededSSE;
@@ -1070,7 +1205,7 @@ public:
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
- : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
const X86_64ABIInfo &getABIInfo() const {
return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
@@ -1243,7 +1378,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Hi = Integer;
} else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
Current = Integer;
- } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
+ (k == BuiltinType::LongDouble &&
+ getContext().getTargetInfo().getTriple().getOS() ==
+ llvm::Triple::NativeClient)) {
Current = SSE;
} else if (k == BuiltinType::LongDouble) {
Lo = X87;
@@ -1266,7 +1404,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
}
if (Ty->isMemberPointerType()) {
- if (Ty->isMemberFunctionPointerType())
+ if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
Lo = Hi = Integer;
else
Current = Integer;
@@ -1329,7 +1467,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Lo = Hi = Integer;
} else if (ET == getContext().FloatTy)
Current = SSE;
- else if (ET == getContext().DoubleTy)
+ else if (ET == getContext().DoubleTy ||
+ (ET == getContext().LongDoubleTy &&
+ getContext().getTargetInfo().getTriple().getOS() ==
+ llvm::Triple::NativeClient))
Lo = Hi = SSE;
else if (ET == getContext().LongDoubleTy)
Current = ComplexX87;
@@ -1708,7 +1849,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
/// float at offset 4. It is conservatively correct for this routine to return
/// false.
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
- const llvm::TargetData &TD) {
+ const llvm::DataLayout &TD) {
// Base case if we find a float.
if (IROffset == 0 && IRType->isFloatTy())
return true;
@@ -1748,8 +1889,8 @@ GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
// We want to pass as <2 x float> if the LLVM IR type contains a float at
// offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
// case.
- if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) &&
- ContainsFloatAtOffset(IRType, IROffset+4, getTargetData()))
+ if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
+ ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
return llvm::Type::getDoubleTy(getVMContext());
@@ -1777,7 +1918,8 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
// returning an 8-byte unit starting with it. See if we can safely use it.
if (IROffset == 0) {
// Pointers and int64's always fill the 8-byte unit.
- if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64))
+ if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
+ IRType->isIntegerTy(64))
return IRType;
// If we have a 1/2/4-byte integer, we can use it only if the rest of the
@@ -1787,8 +1929,10 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
// have to do this analysis on the source type because we can't depend on
// unions being lowered a specific way etc.
if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
- IRType->isIntegerTy(32)) {
- unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth();
+ IRType->isIntegerTy(32) ||
+ (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
+ unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
+ cast<llvm::IntegerType>(IRType)->getBitWidth();
if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
SourceOffset*8+64, getContext()))
@@ -1798,7 +1942,7 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
// If this is a struct, recurse into the field at the specified offset.
- const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
+ const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
if (IROffset < SL->getSizeInBytes()) {
unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
IROffset -= SL->getElementOffset(FieldIdx);
@@ -1810,7 +1954,7 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
llvm::Type *EltTy = ATy->getElementType();
- unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
+ unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
unsigned EltOffset = IROffset/EltSize*EltSize;
return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
SourceOffset);
@@ -1837,14 +1981,14 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
/// return {i32*, float}.
static llvm::Type *
GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
- const llvm::TargetData &TD) {
+ const llvm::DataLayout &TD) {
// In order to correctly satisfy the ABI, we need to the high part to start
// at offset 8. If the high and low parts we inferred are both 4-byte types
// (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
// the second element at offset 8. Check for this:
unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
unsigned HiAlign = TD.getABITypeAlignment(Hi);
- unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
+ unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);
assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
// To handle this, we have to increase the size of the low part so that the
@@ -1996,7 +2140,7 @@ classifyReturnType(QualType RetTy) const {
// known to pass in the high eightbyte of the result. We do this by forming a
// first class struct aggregate with the high and low part: {low, high}
if (HighPart)
- ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
return ABIArgInfo::getDirect(ResType);
}
@@ -2122,7 +2266,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(
// known to pass in the high eightbyte of the result. We do this by forming a
// first class struct aggregate with the high and low part: {low, high}
if (HighPart)
- ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
return ABIArgInfo::getDirect(ResType);
}
@@ -2435,6 +2579,43 @@ llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return AddrTyped;
}
+namespace {
+
+class NaClX86_64ABIInfo : public ABIInfo {
+ public:
+ NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
+ : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+ private:
+ PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
+ X86_64ABIInfo NInfo; // Used for everything else.
+};
+
+class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+ public:
+ NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
+ : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {}
+};
+
+}
+
+void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (FI.getASTCallingConvention() == CC_PnaclCall)
+ PInfo.computeInfo(FI);
+ else
+ NInfo.computeInfo(FI);
+}
+
+llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // Always use the native convention; calling pnacl-style varargs functions
+ // is unuspported.
+ return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
+}
+
+
// PowerPC-32
namespace {
@@ -2497,6 +2678,62 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// PowerPC-64
namespace {
+/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
+class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
+
+public:
+ PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ bool isPromotableTypeForABI(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ // TODO: We can add more logic to computeInfo to improve performance.
+ // Example: For aggregate arguments that fit in a register, we could
+ // use getDirectInReg (as is done below for structs containing a single
+ // floating-point value) to avoid pushing them to memory on function
+ // entry. This would require changing the logic in PPCISelLowering
+ // when lowering the parameters in the caller and args in the callee.
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ // We rely on the default argument classification for the most part.
+ // One exception: An aggregate containing a single floating-point
+ // item must be passed in a register if one is available.
+ const Type *T = isSingleElementStruct(it->type, getContext());
+ if (T) {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ if (BT && BT->isFloatingPoint()) {
+ QualType QT(T, 0);
+ it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
+ continue;
+ }
+ }
+ it->info = classifyArgumentType(it->type);
+ }
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
public:
PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
@@ -2512,9 +2749,94 @@ public:
}
+// Return true if the ABI requires Ty to be passed sign- or zero-
+// extended to 64 bits.
bool
-PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
+PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (Ty->isPromotableIntegerType())
+ return true;
+
+ // In addition to the usual promotable integer types, we also need to
+ // extend all 32-bit types, since the ABI requires promotion to 64 bits.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+ABIArgInfo
+PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ return (isPromotableTypeForABI(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo
+PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableTypeForABI(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
+llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+
+ // Update the va_list pointer.
+ unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
+ unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ // If the argument is smaller than 8 bytes, it is right-adjusted in
+ // its doubleword slot. Adjust the pointer to pick it up from the
+ // correct offset.
+ if (SizeInBytes < 8) {
+ llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
+ AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ }
+
+ llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ return Builder.CreateBitCast(Addr, PTy);
+}
+
+static bool
+PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) {
// This is calculated from the LLVM and GCC tables and verified
// against gcc output. AFAIK all ABIs use the same encoding.
@@ -2553,6 +2875,21 @@ PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
return false;
}
+bool
+PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+
+ return PPC64_initDwarfEHRegSizeTable(CGF, Address);
+}
+
+bool
+PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+
+ return PPC64_initDwarfEHRegSizeTable(CGF, Address);
+}
+
//===----------------------------------------------------------------------===//
// ARM ABI Implementation
//===----------------------------------------------------------------------===//
@@ -2576,14 +2913,18 @@ public:
bool isEABI() const {
StringRef Env =
getContext().getTargetInfo().getTriple().getEnvironmentName();
- return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi");
+ return (Env == "gnueabi" || Env == "eabi" ||
+ Env == "android" || Env == "androideabi");
}
private:
ABIKind getABIKind() const { return Kind; }
ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs,
+ unsigned &AllocatedVFP,
+ bool &IsHA) const;
+ bool isIllegalVectorType(QualType Ty) const;
virtual void computeInfo(CGFunctionInfo &FI) const;
@@ -2626,10 +2967,33 @@ public:
}
void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ // To correctly handle Homogeneous Aggregate, we need to keep track of the
+ // VFP registers allocated so far.
+ // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
+ // VFP registers of the appropriate type unallocated then the argument is
+ // allocated to the lowest-numbered sequence of such registers.
+ // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
+ // unallocated are marked as unavailable.
+ unsigned AllocatedVFP = 0;
+ int VFPRegs[16] = { 0 };
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ it != ie; ++it) {
+ unsigned PreAllocation = AllocatedVFP;
+ bool IsHA = false;
+ // 6.1.2.3 There is one VFP co-processor register class using registers
+ // s0-s15 (d0-d7) for passing arguments.
+ const unsigned NumVFPs = 16;
+ it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA);
+ // If we do not have enough VFP registers for the HA, any VFP registers
+ // that are unallocated are marked as unavailable. To achieve this, we add
+ // padding of (NumVFPs - PreAllocation) floats.
+ if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
+ llvm::Type *PaddingTy = llvm::ArrayType::get(
+ llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
+ it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
+ }
+ }
// Always honor user-specified calling convention.
if (FI.getCallingConvention() != llvm::CallingConv::C)
@@ -2637,7 +3001,9 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
// Calling convention as default by an ABI.
llvm::CallingConv::ID DefaultCC;
- if (isEABI())
+ if (getContext().getTargetInfo().getTriple().getEnvironmentName()=="gnueabihf")
+ DefaultCC = llvm::CallingConv::ARM_AAPCS_VFP;
+ else if (isEABI())
DefaultCC = llvm::CallingConv::ARM_AAPCS;
else
DefaultCC = llvm::CallingConv::ARM_APCS;
@@ -2729,7 +3095,88 @@ static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
return (Members > 0 && Members <= 4);
}
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
+/// markAllocatedVFPs - update VFPRegs according to the alignment and
+/// number of VFP registers (unit is S register) requested.
+static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP,
+ unsigned Alignment,
+ unsigned NumRequired) {
+ // Early Exit.
+ if (AllocatedVFP >= 16)
+ return;
+ // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
+ // VFP registers of the appropriate type unallocated then the argument is
+ // allocated to the lowest-numbered sequence of such registers.
+ for (unsigned I = 0; I < 16; I += Alignment) {
+ bool FoundSlot = true;
+ for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
+ if (J >= 16 || VFPRegs[J]) {
+ FoundSlot = false;
+ break;
+ }
+ if (FoundSlot) {
+ for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
+ VFPRegs[J] = 1;
+ AllocatedVFP += NumRequired;
+ return;
+ }
+ }
+ // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
+ // unallocated are marked as unavailable.
+ for (unsigned I = 0; I < 16; I++)
+ VFPRegs[I] = 1;
+ AllocatedVFP = 17; // We do not have enough VFP registers.
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs,
+ unsigned &AllocatedVFP,
+ bool &IsHA) const {
+ // We update number of allocated VFPs according to
+ // 6.1.2.1 The following argument types are VFP CPRCs:
+ // A single-precision floating-point type (including promoted
+ // half-precision types); A double-precision floating-point type;
+ // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
+ // with a Base Type of a single- or double-precision floating-point type,
+ // 64-bit containerized vectors or 128-bit containerized vectors with one
+ // to four Elements.
+
+ // Handle illegal vector types here.
+ if (isIllegalVectorType(Ty)) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 32) {
+ llvm::Type *ResType =
+ llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 64) {
+ llvm::Type *ResType = llvm::VectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), 2);
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 128) {
+ llvm::Type *ResType = llvm::VectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), 4);
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ }
+ // Update VFPRegs for legal vector types.
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ // Size of a legal vector should be power of 2 and above 64.
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32);
+ }
+ // Update VFPRegs for floating point types.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::Half ||
+ BT->getKind() == BuiltinType::Float)
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1);
+ if (BT->getKind() == BuiltinType::Double ||
+ BT->getKind() == BuiltinType::LongDouble)
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
+ }
+
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
@@ -2749,18 +3196,42 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
- // Homogeneous Aggregates need to be expanded.
+ // Homogeneous Aggregates need to be expanded when we can fit the aggregate
+ // into VFP registers.
const Type *Base = 0;
- if (isHomogeneousAggregate(Ty, Base, getContext())) {
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
assert(Base && "Base class should be set for homogeneous aggregate");
+ // Base can be a floating-point or a vector.
+ if (Base->isVectorType()) {
+ // ElementSize is in number of floats.
+ unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize,
+ Members * ElementSize);
+ } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members);
+ else {
+ assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
+ Base->isSpecificBuiltinType(BuiltinType::LongDouble));
+ markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2);
+ }
+ IsHA = true;
return ABIArgInfo::getExpand();
}
}
// Support byval for ARM.
- if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) ||
- getContext().getTypeAlign(Ty) > 64) {
- return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
+ // most 8-byte. We realign the indirect argument if type alignment is bigger
+ // than ABI alignment.
+ uint64_t ABIAlign = 4;
+ uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
+ if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
+ getABIKind() == ARMABIInfo::AAPCS)
+ ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
+ if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
}
// Otherwise, pass by coercing to a structure of the appropriate size.
@@ -2946,6 +3417,21 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getIndirect(0);
}
+/// isIllegalVector - check whether Ty is an illegal vector type.
+bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ uint64_t Size = getContext().getTypeSize(VT);
+ // NumElements should be power of 2.
+ if ((NumElements & (NumElements - 1)) != 0)
+ return true;
+ // Size should be greater than 32 bits.
+ return Size <= 32;
+ }
+ return false;
+}
+
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BP = CGF.Int8PtrTy;
@@ -2954,30 +3440,104 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- // Handle address alignment for type alignment > 32 bits
+
+ uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+ bool IsIndirect = false;
+
+ // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
+ // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
+ if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
+ getABIKind() == ARMABIInfo::AAPCS)
+ TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
+ else
+ TyAlign = 4;
+ // Use indirect if size of the illegal vector is bigger than 16 bytes.
+ if (isIllegalVectorType(Ty) && Size > 16) {
+ IsIndirect = true;
+ Size = 4;
+ TyAlign = 4;
+ }
+
+ // Handle address alignment for ABI alignment > 4 bytes.
if (TyAlign > 4) {
assert((TyAlign & (TyAlign - 1)) == 0 &&
"Alignment is not power of 2!");
llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
}
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::RoundUpToAlignment(Size, 4);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ if (IsIndirect)
+ Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
+ else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
+ // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
+ // may not be correctly aligned for the vector type. We create an aligned
+ // temporary space and copy the content over from ap.cur to the temporary
+ // space. This is necessary if the natural alignment of the type is greater
+ // than the ABI alignment.
+ llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
+ CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
+ llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
+ "var.align");
+ llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
+ llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
+ Builder.CreateMemCpy(Dst, Src,
+ llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
+ TyAlign, false);
+ Addr = AlignedTemp; //The content is in aligned location.
+ }
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
return AddrTyped;
}
+namespace {
+
+class NaClARMABIInfo : public ABIInfo {
+ public:
+ NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
+ : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+ private:
+ PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
+ ARMABIInfo NInfo; // Used for everything else.
+};
+
+class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo {
+ public:
+ NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
+ : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
+};
+
+}
+
+void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (FI.getASTCallingConvention() == CC_PnaclCall)
+ PInfo.computeInfo(FI);
+ else
+ static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
+}
+
+llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // Always use the native convention; calling pnacl-style varargs functions
+ // is unsupported.
+ return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
+}
+
//===----------------------------------------------------------------------===//
// NVPTX ABI Implementation
//===----------------------------------------------------------------------===//
@@ -3072,7 +3632,7 @@ SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
// OpenCL __kernel functions get a kernel calling convention
F->setCallingConv(llvm::CallingConv::PTX_Kernel);
// And kernel functions are not subject to inlining
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::NoInline);
}
}
@@ -3188,7 +3748,7 @@ void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
F->setCallingConv(CC);
// Step 2: Add attributes goodness.
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::NoInline);
}
// Step 3: Emit _interrupt_handler alias.
@@ -3226,7 +3786,7 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
F->setCallingConv(llvm::CallingConv::MSP430_INTR);
// Step 2: Add attributes goodness.
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::NoInline);
// Step 3: Emit ISR vector alias.
unsigned Num = attr->getNumber() + 0xffe0;
@@ -3583,7 +4143,7 @@ void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
if (M.getLangOpts().OpenCL) {
if (FD->hasAttr<OpenCLKernelAttr>()) {
// OpenCL C Kernel functions are not subject to inlining
- F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr(llvm::Attributes::NoInline);
if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
@@ -3767,6 +4327,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
default:
return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
+ case llvm::Triple::le32:
+ return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
case llvm::Triple::mips:
case llvm::Triple::mipsel:
return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
@@ -3779,19 +4341,29 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::thumb:
{
ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
-
if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0)
Kind = ARMABIInfo::APCS;
- else if (CodeGenOpts.FloatABI == "hard")
+ else if (CodeGenOpts.FloatABI == "hard" ||
+ (CodeGenOpts.FloatABI != "soft" && Triple.getEnvironment()==llvm::Triple::GNUEABIHF))
Kind = ARMABIInfo::AAPCS_VFP;
- return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
+ switch (Triple.getOS()) {
+ case llvm::Triple::NativeClient:
+ return *(TheTargetCodeGenInfo =
+ new NaClARMTargetCodeGenInfo(Types, Kind));
+ default:
+ return *(TheTargetCodeGenInfo =
+ new ARMTargetCodeGenInfo(Types, Kind));
+ }
}
case llvm::Triple::ppc:
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
case llvm::Triple::ppc64:
- return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
+ if (Triple.isOSBinFormatELF())
+ return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
+ else
+ return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
@@ -3848,6 +4420,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::MinGW32:
case llvm::Triple::Cygwin:
return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
+ case llvm::Triple::NativeClient:
+ return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX));
default:
return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
HasAVX));
OpenPOWER on IntegriCloud