summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen')
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h12
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp339
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp74
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h7
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h8
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp924
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h1
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp482
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.h8
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp156
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp186
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp195
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp1656
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.h428
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp449
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp118
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp111
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp101
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp102
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp505
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp29
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp377
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp833
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h10
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp275
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp144
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp427
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h17
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt6
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp348
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp801
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h703
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp370
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h60
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp82
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h34
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h10
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Makefile9
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp442
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Mangle.h12
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp1191
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp392
46 files changed, 9084 insertions, 3500 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
index 1ab2f55..85524ac 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
@@ -11,11 +11,9 @@
#define CLANG_CODEGEN_ABIINFO_H
#include "clang/AST/Type.h"
-
-#include <cassert>
+#include "llvm/Type.h"
namespace llvm {
- class Type;
class Value;
class LLVMContext;
}
@@ -70,7 +68,7 @@ namespace clang {
private:
Kind TheKind;
- const llvm::Type *TypeData;
+ llvm::PATypeHolder TypeData;
unsigned UIntData;
bool BoolData;
@@ -136,7 +134,11 @@ namespace clang {
virtual void computeInfo(CodeGen::CGFunctionInfo &FI,
ASTContext &Ctx,
- llvm::LLVMContext &VMContext) const = 0;
+ llvm::LLVMContext &VMContext,
+ // This is the preferred type for argument lowering
+ // which can be used to generate better IR.
+ const llvm::Type *const *PrefTypes = 0,
+ unsigned NumPrefTypes = 0) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
/// \arg Ty from the va_list pointed to by \arg VAListAddr.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
new file mode 100644
index 0000000..69efe43
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -0,0 +1,339 @@
+//===--- BackendUtil.cpp - LLVM Backend Utilities -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/StandardPasses.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/SubtargetFeature.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegistry.h"
+using namespace clang;
+using namespace llvm;
+
+namespace {
+
+class EmitAssemblyHelper {
+ Diagnostic &Diags;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ Module *TheModule;
+
+ Timer CodeGenerationTime;
+
+ mutable FunctionPassManager *CodeGenPasses;
+ mutable PassManager *PerModulePasses;
+ mutable FunctionPassManager *PerFunctionPasses;
+
+private:
+ FunctionPassManager *getCodeGenPasses() const {
+ if (!CodeGenPasses) {
+ CodeGenPasses = new FunctionPassManager(TheModule);
+ CodeGenPasses->add(new TargetData(TheModule));
+ }
+ return CodeGenPasses;
+ }
+
+ PassManager *getPerModulePasses() const {
+ if (!PerModulePasses) {
+ PerModulePasses = new PassManager();
+ PerModulePasses->add(new TargetData(TheModule));
+ }
+ return PerModulePasses;
+ }
+
+ FunctionPassManager *getPerFunctionPasses() const {
+ if (!PerFunctionPasses) {
+ PerFunctionPasses = new FunctionPassManager(TheModule);
+ PerFunctionPasses->add(new TargetData(TheModule));
+ }
+ return PerFunctionPasses;
+ }
+
+ void CreatePasses();
+
+ /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR.
+ ///
+ /// \return True on success.
+ bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS);
+
+public:
+ EmitAssemblyHelper(Diagnostic &_Diags,
+ const CodeGenOptions &CGOpts, const TargetOptions &TOpts,
+ Module *M)
+ : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts),
+ TheModule(M), CodeGenerationTime("Code Generation Time"),
+ CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {}
+
+ ~EmitAssemblyHelper() {
+ delete CodeGenPasses;
+ delete PerModulePasses;
+ delete PerFunctionPasses;
+ }
+
+ void EmitAssembly(BackendAction Action, raw_ostream *OS);
+};
+
+}
+
+void EmitAssemblyHelper::CreatePasses() {
+ unsigned OptLevel = CodeGenOpts.OptimizationLevel;
+ CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
+
+ // Handle disabling of LLVM optimization, where we want to preserve the
+ // internal module before any optimization.
+ if (CodeGenOpts.DisableLLVMOpts) {
+ OptLevel = 0;
+ Inlining = CodeGenOpts.NoInlining;
+ }
+
+ // In -O0 if checking is disabled, we don't even have per-function passes.
+ if (CodeGenOpts.VerifyModule)
+ getPerFunctionPasses()->add(createVerifierPass());
+
+ // Assume that standard function passes aren't run for -O0.
+ if (OptLevel > 0)
+ llvm::createStandardFunctionPasses(getPerFunctionPasses(), OptLevel);
+
+ llvm::Pass *InliningPass = 0;
+ switch (Inlining) {
+ case CodeGenOptions::NoInlining: break;
+ case CodeGenOptions::NormalInlining: {
+ // Set the inline threshold following llvm-gcc.
+ //
+ // FIXME: Derive these constants in a principled fashion.
+ unsigned Threshold = 225;
+ if (CodeGenOpts.OptimizeSize)
+ Threshold = 75;
+ else if (OptLevel > 2)
+ Threshold = 275;
+ InliningPass = createFunctionInliningPass(Threshold);
+ break;
+ }
+ case CodeGenOptions::OnlyAlwaysInlining:
+ InliningPass = createAlwaysInlinerPass(); // Respect always_inline
+ break;
+ }
+
+ // For now we always create per module passes.
+ llvm::createStandardModulePasses(getPerModulePasses(), OptLevel,
+ CodeGenOpts.OptimizeSize,
+ CodeGenOpts.UnitAtATime,
+ CodeGenOpts.UnrollLoops,
+ CodeGenOpts.SimplifyLibCalls,
+ /*HaveExceptions=*/true,
+ InliningPass);
+}
+
+bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
+ formatted_raw_ostream &OS) {
+ // Create the TargetMachine for generating code.
+ std::string Error;
+ std::string Triple = TheModule->getTargetTriple();
+ const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
+ if (!TheTarget) {
+ Diags.Report(diag::err_fe_unable_to_create_target) << Error;
+ return false;
+ }
+
+ // FIXME: Expose these capabilities via actual APIs!!!! Aside from just
+ // being gross, this is also totally broken if we ever care about
+ // concurrency.
+
+ // Set frame pointer elimination mode.
+ if (!CodeGenOpts.DisableFPElim) {
+ llvm::NoFramePointerElim = false;
+ llvm::NoFramePointerElimNonLeaf = false;
+ } else if (CodeGenOpts.OmitLeafFramePointer) {
+ llvm::NoFramePointerElim = false;
+ llvm::NoFramePointerElimNonLeaf = true;
+ } else {
+ llvm::NoFramePointerElim = true;
+ llvm::NoFramePointerElimNonLeaf = true;
+ }
+
+ // Set float ABI type.
+ if (CodeGenOpts.FloatABI == "soft")
+ llvm::FloatABIType = llvm::FloatABI::Soft;
+ else if (CodeGenOpts.FloatABI == "hard")
+ llvm::FloatABIType = llvm::FloatABI::Hard;
+ else {
+ assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
+ llvm::FloatABIType = llvm::FloatABI::Default;
+ }
+
+ NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ llvm::UseSoftFloat = CodeGenOpts.SoftFloat;
+ UnwindTablesMandatory = CodeGenOpts.UnwindTables;
+
+ TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);
+
+ TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections);
+ TargetMachine::setDataSections (CodeGenOpts.DataSections);
+
+ // FIXME: Parse this earlier.
+ if (CodeGenOpts.RelocationModel == "static") {
+ TargetMachine::setRelocationModel(llvm::Reloc::Static);
+ } else if (CodeGenOpts.RelocationModel == "pic") {
+ TargetMachine::setRelocationModel(llvm::Reloc::PIC_);
+ } else {
+ assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
+ "Invalid PIC model!");
+ TargetMachine::setRelocationModel(llvm::Reloc::DynamicNoPIC);
+ }
+ // FIXME: Parse this earlier.
+ if (CodeGenOpts.CodeModel == "small") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Small);
+ } else if (CodeGenOpts.CodeModel == "kernel") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Kernel);
+ } else if (CodeGenOpts.CodeModel == "medium") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Medium);
+ } else if (CodeGenOpts.CodeModel == "large") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Large);
+ } else {
+ assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!");
+ TargetMachine::setCodeModel(llvm::CodeModel::Default);
+ }
+
+ std::vector<const char *> BackendArgs;
+ BackendArgs.push_back("clang"); // Fake program name.
+ if (!CodeGenOpts.DebugPass.empty()) {
+ BackendArgs.push_back("-debug-pass");
+ BackendArgs.push_back(CodeGenOpts.DebugPass.c_str());
+ }
+ if (!CodeGenOpts.LimitFloatPrecision.empty()) {
+ BackendArgs.push_back("-limit-float-precision");
+ BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
+ }
+ if (llvm::TimePassesIsEnabled)
+ BackendArgs.push_back("-time-passes");
+ BackendArgs.push_back(0);
+ llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
+ const_cast<char **>(&BackendArgs[0]));
+
+ std::string FeaturesStr;
+ if (TargetOpts.CPU.size() || TargetOpts.Features.size()) {
+ SubtargetFeatures Features;
+ Features.setCPU(TargetOpts.CPU);
+ for (std::vector<std::string>::const_iterator
+ it = TargetOpts.Features.begin(),
+ ie = TargetOpts.Features.end(); it != ie; ++it)
+ Features.AddFeature(*it);
+ FeaturesStr = Features.getString();
+ }
+ TargetMachine *TM = TheTarget->createTargetMachine(Triple, FeaturesStr);
+
+ if (CodeGenOpts.RelaxAll)
+ TM->setMCRelaxAll(true);
+
+ // Create the code generator passes.
+ FunctionPassManager *PM = getCodeGenPasses();
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+
+ switch (CodeGenOpts.OptimizationLevel) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ // Normal mode, emit a .s or .o file by running the code generator. Note,
+ // this also adds codegenerator level optimization passes.
+ TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
+ if (Action == Backend_EmitObj)
+ CGFT = TargetMachine::CGFT_ObjectFile;
+ else if (Action == Backend_EmitMCNull)
+ CGFT = TargetMachine::CGFT_Null;
+ else
+ assert(Action == Backend_EmitAssembly && "Invalid action!");
+ if (TM->addPassesToEmitFile(*PM, OS, CGFT, OptLevel,
+ /*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
+ Diags.Report(diag::err_fe_unable_to_interface_with_target);
+ return false;
+ }
+
+ return true;
+}
+
+void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
+ TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0);
+ llvm::formatted_raw_ostream FormattedOS;
+
+ CreatePasses();
+ switch (Action) {
+ case Backend_EmitNothing:
+ break;
+
+ case Backend_EmitBC:
+ getPerModulePasses()->add(createBitcodeWriterPass(*OS));
+ break;
+
+ case Backend_EmitLL:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ getPerModulePasses()->add(createPrintModulePass(&FormattedOS));
+ break;
+
+ default:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ if (!AddEmitPasses(Action, FormattedOS))
+ return;
+ }
+
+ // Run passes. For now we do all passes at once, but eventually we
+ // would like to have the option of streaming code generation.
+
+ if (PerFunctionPasses) {
+ PrettyStackTraceString CrashInfo("Per-function optimization");
+
+ PerFunctionPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(),
+ E = TheModule->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ PerFunctionPasses->run(*I);
+ PerFunctionPasses->doFinalization();
+ }
+
+ if (PerModulePasses) {
+ PrettyStackTraceString CrashInfo("Per-module optimization passes");
+ PerModulePasses->run(*TheModule);
+ }
+
+ if (CodeGenPasses) {
+ PrettyStackTraceString CrashInfo("Code generation");
+
+ CodeGenPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(),
+ E = TheModule->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ CodeGenPasses->run(*I);
+ CodeGenPasses->doFinalization();
+ }
+}
+
+void clang::EmitBackendOutput(Diagnostic &Diags, const CodeGenOptions &CGOpts,
+ const TargetOptions &TOpts, Module *M,
+ BackendAction Action, raw_ostream *OS) {
+ EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, M);
+
+ AsmHelper.EmitAssembly(Action, OS);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
index de58597..cb9e636 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -228,7 +228,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
// block literal.
// __invoke
llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl,
+ = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, BE, Info, CurFuncDecl,
LocalDeclMap);
BlockHasCopyDispose |= Info.BlockHasCopyDispose;
Elts[3] = Fn;
@@ -253,7 +253,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args,
FunctionType::ExtInfo());
- if (CGM.ReturnTypeUsesSret(FnInfo))
+ if (CGM.ReturnTypeUsesSRet(FnInfo))
flags |= BLOCK_USE_STRET;
}
const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
@@ -296,8 +296,11 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
QualType Ty = E->getType();
if (BDRE && BDRE->isByRef()) {
- Types[i+BlockFields] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
- } else
+ Types[i+BlockFields] =
+ llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
+ } else if (BDRE && BDRE->getDecl()->getType()->isReferenceType()) {
+ Types[i+BlockFields] = llvm::PointerType::get(ConvertType(Ty), 0);
+ } else
Types[i+BlockFields] = ConvertType(Ty);
}
@@ -358,11 +361,23 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
Builder.CreateStore(Loc, Addr);
continue;
} else {
- E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
- VD->getType(),
- SourceLocation());
+ if (BDRE->getCopyConstructorExpr()) {
+ E = BDRE->getCopyConstructorExpr();
+ PushDestructorCleanup(E->getType(), Addr);
+ }
+ else {
+ E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
+ VD->getType().getNonReferenceType(),
+ SourceLocation());
+ if (VD->getType()->isReferenceType()) {
+ E = new (getContext())
+ UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf,
+ getContext().getPointerType(E->getType()),
+ SourceLocation());
+ }
+ }
+ }
}
- }
if (BDRE->isByRef()) {
E = new (getContext())
@@ -386,8 +401,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
llvm::Value *BlockLiteral = LoadBlockStruct();
Loc = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset.getQuantity()),
+ llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
"block.literal");
Ty = llvm::PointerType::get(Ty, 0);
Loc = Builder.CreateBitCast(Loc, Ty);
@@ -599,13 +613,13 @@ void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
bool IsByRef) {
+
CharUnits offset = BlockDecls[VD];
assert(!offset.isZero() && "getting address of unallocated decl");
llvm::Value *BlockLiteral = LoadBlockStruct();
llvm::Value *V = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset.getQuantity()),
+ llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
"block.literal");
if (IsByRef) {
const llvm::Type *PtrStructTy
@@ -626,9 +640,10 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
V = Builder.CreateLoad(V);
} else {
const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType());
-
Ty = llvm::PointerType::get(Ty, 0);
V = Builder.CreateBitCast(V, Ty);
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "ref.tmp");
}
return V;
}
@@ -680,7 +695,7 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
CGBlockInfo Info(n);
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap);
+ = CodeGenFunction(CGM).GenerateBlockFunction(GlobalDecl(), BE, Info, 0, LocalDeclMap);
assert(Info.BlockSize == BlockLiteralSize
&& "no imports allowed for global block");
@@ -719,7 +734,7 @@ llvm::Value *CodeGenFunction::LoadBlockStruct() {
}
llvm::Function *
-CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
+CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
CGBlockInfo &Info,
const Decl *OuterFuncDecl,
llvm::DenseMap<const Decl*, llvm::Value*> ldm) {
@@ -792,18 +807,29 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
MangleBuffer Name;
- CGM.getMangledName(Name, BD);
+ CGM.getMangledName(GD, Name, BD);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
Name.getString(), &CGM.getModule());
CGM.SetInternalFunctionAttributes(BD, Fn, FI);
+ QualType FnType(BlockFunctionType, 0);
+ bool HasPrototype = isa<FunctionProtoType>(BlockFunctionType);
+
+ IdentifierInfo *ID = &getContext().Idents.get(Name.getString());
+ CurCodeDecl = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), ID, FnType,
+ 0,
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false, HasPrototype);
+
StartFunction(BD, ResultType, Fn, Args,
BExpr->getBody()->getLocEnd());
CurFuncDecl = OuterFuncDecl;
- CurCodeDecl = BD;
// If we have a C++ 'this' reference, go ahead and force it into
// existence now.
@@ -985,8 +1011,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
- llvm::Value *N = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(T->getContext()), flag);
+ llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
llvm::Value *F = getBlockObjectAssign();
Builder.CreateCall3(F, Dstv, Srcv, N);
}
@@ -1138,8 +1163,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
flag |= BLOCK_BYREF_CALLER;
- llvm::Value *N = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(T->getContext()), flag);
+ llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
llvm::Value *F = getBlockObjectAssign();
Builder.CreateCall3(F, DstObj, SrcObj, N);
@@ -1241,7 +1265,7 @@ llvm::Value *BlockFunction::getBlockObjectDispose() {
std::vector<const llvm::Type*> ArgTys;
const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+ ArgTys.push_back(CGF.Int32Ty);
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
CGM.BlockObjectDispose
= CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose");
@@ -1256,7 +1280,7 @@ llvm::Value *BlockFunction::getBlockObjectAssign() {
const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
ArgTys.push_back(PtrToInt8Ty);
ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+ ArgTys.push_back(CGF.Int32Ty);
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
CGM.BlockObjectAssign
= CGM.CreateRuntimeFunction(FTy, "_Block_object_assign");
@@ -1268,7 +1292,7 @@ void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
llvm::Value *F = getBlockObjectDispose();
llvm::Value *N;
V = Builder.CreateBitCast(V, PtrToInt8Ty);
- N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag);
+ N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
Builder.CreateCall2(F, V, N);
}
@@ -1276,7 +1300,7 @@ ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
CGBuilderTy &B)
- : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) {
+ : CGM(cgm), VMContext(cgm.getLLVMContext()), CGF(cgf), Builder(B) {
PtrToInt8Ty = llvm::PointerType::getUnqual(
llvm::Type::getInt8Ty(VMContext));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
index e9b2bd5..772a62c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
@@ -99,7 +99,7 @@ public:
llvm::Value *BlockObjectAssign;
llvm::Value *BlockObjectDispose;
- const llvm::Type *PtrToInt8Ty;
+ const llvm::PointerType *PtrToInt8Ty;
std::map<uint64_t, llvm::Constant *> AssignCache;
std::map<uint64_t, llvm::Constant *> DestroyCache;
@@ -121,13 +121,14 @@ public:
class BlockFunction : public BlockBase {
CodeGenModule &CGM;
- CodeGenFunction &CGF;
ASTContext &getContext() const;
protected:
llvm::LLVMContext &VMContext;
public:
+ CodeGenFunction &CGF;
+
const llvm::PointerType *PtrToInt8Ty;
struct HelperInfo {
int index;
@@ -180,7 +181,7 @@ public:
/// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
llvm::DenseMap<const Decl*, CharUnits> BlockDecls;
-
+
/// BlockCXXThisOffset - The offset of the C++ 'this' value within
/// the block structure.
CharUnits BlockCXXThisOffset;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
index ed56bd9..8120217 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
@@ -14,12 +14,14 @@
namespace clang {
namespace CodeGen {
- // Don't preserve names on values in an optimized build.
+
+// Don't preserve names on values in an optimized build.
#ifdef NDEBUG
- typedef llvm::IRBuilder<false> CGBuilderTy;
+typedef llvm::IRBuilder<false> CGBuilderTy;
#else
- typedef llvm::IRBuilder<> CGBuilderTy;
+typedef llvm::IRBuilder<> CGBuilderTy;
#endif
+
} // end namespace CodeGen
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
index dd505c2..fff4bac 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -14,6 +14,7 @@
#include "TargetInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
@@ -84,11 +85,6 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Args[1]));
}
-static llvm::ConstantInt *getInt32(llvm::LLVMContext &Context, int32_t Value) {
- return llvm::ConstantInt::get(llvm::Type::getInt32Ty(Context), Value);
-}
-
-
/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
/// which must be a scalar floating point type.
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
@@ -283,9 +279,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
// FIXME: Technically these constants should of type 'int', yes?
RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ llvm::ConstantInt::get(Int32Ty, 0);
Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3);
+ llvm::ConstantInt::get(Int32Ty, 3);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
}
@@ -395,12 +391,68 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
V = Builder.CreateAnd(Eq, IsNotInf, "and");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
+
+ case Builtin::BI__builtin_fpclassify: {
+ Value *V = EmitScalarExpr(E->getArg(5));
+ const llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
+
+ // Create Result
+ BasicBlock *Begin = Builder.GetInsertBlock();
+ BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
+ Builder.SetInsertPoint(End);
+ PHINode *Result =
+ Builder.CreatePHI(ConvertType(E->getArg(0)->getType()),
+ "fpclassify_result");
+
+ // if (V==0) return FP_ZERO
+ Builder.SetInsertPoint(Begin);
+ Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
+ "iszero");
+ Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
+ BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
+ Builder.CreateCondBr(IsZero, End, NotZero);
+ Result->addIncoming(ZeroLiteral, Begin);
+
+ // if (V != V) return FP_NAN
+ Builder.SetInsertPoint(NotZero);
+ Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
+ Value *NanLiteral = EmitScalarExpr(E->getArg(0));
+ BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
+ Builder.CreateCondBr(IsNan, End, NotNan);
+ Result->addIncoming(NanLiteral, NotZero);
+
+ // if (fabs(V) == infinity) return FP_INFINITY
+ Builder.SetInsertPoint(NotNan);
+ Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
+ Value *IsInf =
+ Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
+ "isinf");
+ Value *InfLiteral = EmitScalarExpr(E->getArg(1));
+ BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
+ Builder.CreateCondBr(IsInf, End, NotInf);
+ Result->addIncoming(InfLiteral, NotNan);
+
+ // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
+ Builder.SetInsertPoint(NotInf);
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ Value *NormalResult =
+ Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)));
+ Builder.CreateBr(End);
+ Result->addIncoming(NormalResult, NotInf);
+
+ // return Result
+ Builder.SetInsertPoint(End);
+ return RValue::get(Result);
+ }
case Builtin::BIalloca:
case Builtin::BI__builtin_alloca: {
- // FIXME: LLVM IR Should allow alloca with an i64 size!
Value *Size = EmitScalarExpr(E->getArg(0));
- Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp");
return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
}
case Builtin::BIbzero:
@@ -411,7 +463,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Address,
llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
SizeVal,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(Int32Ty, 1),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
return RValue::get(Address);
}
@@ -423,10 +475,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateCall5(CGM.getMemCpyFn(Address->getType(), SrcAddr->getType(),
SizeVal->getType()),
Address, SrcAddr, SizeVal,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(Int32Ty, 1),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
return RValue::get(Address);
}
+
+ case Builtin::BI__builtin_objc_memmove_collectable: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
+ Address, SrcAddr, SizeVal);
+ return RValue::get(Address);
+ }
+
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove: {
Value *Address = EmitScalarExpr(E->getArg(0));
@@ -435,7 +497,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateCall5(CGM.getMemMoveFn(Address->getType(), SrcAddr->getType(),
SizeVal->getType()),
Address, SrcAddr, SizeVal,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(Int32Ty, 1),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
return RValue::get(Address);
}
@@ -448,7 +510,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
llvm::Type::getInt8Ty(VMContext)),
SizeVal,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(Int32Ty, 1),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
return RValue::get(Address);
}
@@ -464,21 +526,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
int32_t Offset = 0;
Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0);
- return RValue::get(Builder.CreateCall(F, getInt32(VMContext, Offset)));
+ return RValue::get(Builder.CreateCall(F,
+ llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
- Depth = Builder.CreateIntCast(Depth,
- llvm::Type::getInt32Ty(VMContext),
- false, "tmp");
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_frame_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
- Depth = Builder.CreateIntCast(Depth,
- llvm::Type::getInt32Ty(VMContext),
- false, "tmp");
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
return RValue::get(Builder.CreateCall(F, Depth));
}
@@ -551,36 +610,45 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
// Otherwise, ask the codegen data what to do.
- const llvm::IntegerType *Int64Ty = llvm::IntegerType::get(C, 64);
if (getTargetHooks().extendPointerWithSExt())
return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
else
return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
}
-#if 0
- // FIXME: Finish/enable when LLVM backend support stabilizes
case Builtin::BI__builtin_setjmp: {
+ // Buffer is a void**.
Value *Buf = EmitScalarExpr(E->getArg(0));
- // Store the frame pointer to the buffer
- Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+
+ // Store the frame pointer to the setjmp buffer.
Value *FrameAddr =
- Builder.CreateCall(FrameAddrF,
- Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)));
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
+ ConstantInt::get(Int32Ty, 0));
Builder.CreateStore(FrameAddr, Buf);
- // Call the setjmp intrinsic
- Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
- const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
- Buf = Builder.CreateBitCast(Buf, DestType);
+
+ // Store the stack pointer to the setjmp buffer.
+ Value *StackAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
+ Value *StackSaveSlot =
+ Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
+ Builder.CreateStore(StackAddr, StackSaveSlot);
+
+ // Call LLVM's EH setjmp, which is lightweight.
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
+ Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext));
return RValue::get(Builder.CreateCall(F, Buf));
}
case Builtin::BI__builtin_longjmp: {
- Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
Value *Buf = EmitScalarExpr(E->getArg(0));
- const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
- Buf = Builder.CreateBitCast(Buf, DestType);
- return RValue::get(Builder.CreateCall(F, Buf));
+ Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext));
+
+ // Call LLVM's EH longjmp, which is lightweight.
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
+
+ // longjmp doesn't return; mark this as unreachable
+ Value *V = Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return RValue::get(V);
}
-#endif
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
case Builtin::BI__sync_fetch_and_or:
@@ -870,14 +938,703 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
}
}
+const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
+ switch (type) {
+ default: break;
+ case 0:
+ case 5: return llvm::VectorType::get(llvm::Type::getInt8Ty(C), 8 << (int)q);
+ case 6:
+ case 7:
+ case 1: return llvm::VectorType::get(llvm::Type::getInt16Ty(C),4 << (int)q);
+ case 2: return llvm::VectorType::get(llvm::Type::getInt32Ty(C),2 << (int)q);
+ case 3: return llvm::VectorType::get(llvm::Type::getInt64Ty(C),1 << (int)q);
+ case 4: return llvm::VectorType::get(llvm::Type::getFloatTy(C),2 << (int)q);
+ };
+ return 0;
+}
+
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
+ unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
+ SmallVector<Constant*, 16> Indices(nElts, C);
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(V, V, SV, "lane");
+}
+
+Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
+ const char *name, bool splat,
+ unsigned shift, bool rightshift) {
+ unsigned j = 0;
+ for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
+ ai != ae; ++ai, ++j)
+ if (shift > 0 && shift == j)
+ Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
+ else
+ Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
+
+ if (splat) {
+ Ops[j-1] = EmitNeonSplat(Ops[j-1], cast<Constant>(Ops[j]));
+ Ops.resize(j);
+ }
+ return Builder.CreateCall(F, Ops.begin(), Ops.end(), name);
+}
+
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
+ bool neg) {
+ ConstantInt *CI = cast<ConstantInt>(V);
+ int SV = CI->getSExtValue();
+
+ const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
+ SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C);
+ return llvm::ConstantVector::get(CV.begin(), CV.size());
+}
+
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
+ if (BuiltinID == ARM::BI__clear_cache) {
+ const FunctionDecl *FD = E->getDirectCallee();
+ Value *a = EmitScalarExpr(E->getArg(0));
+ Value *b = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ llvm::StringRef Name = FD->getName();
+ return Builder.CreateCall2(CGM.CreateRuntimeFunction(FTy, Name),
+ a, b);
+ }
+
+ // Determine the type of this overloaded NEON intrinsic.
+ assert(BuiltinID > ARM::BI__builtin_thread_pointer);
+
+ llvm::SmallVector<Value*, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ llvm::APSInt Result;
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ if (!Arg->isIntegerConstantExpr(Result, getContext()))
+ return 0;
+
+ unsigned type = Result.getZExtValue();
+ bool usgn = type & 0x08;
+ bool quad = type & 0x10;
+ bool poly = (type & 0x7) == 5 || (type & 0x7) == 6;
+ bool splat = false;
+
+ const llvm::VectorType *VTy = GetNeonType(VMContext, type & 0x7, quad);
+ const llvm::Type *Ty = VTy;
+ if (!Ty)
+ return 0;
+
+ unsigned Int;
switch (BuiltinID) {
default: return 0;
+ case ARM::BI__builtin_neon_vaba_v:
+ case ARM::BI__builtin_neon_vabaq_v:
+ Int = usgn ? Intrinsic::arm_neon_vabau : Intrinsic::arm_neon_vabas;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaba");
+ case ARM::BI__builtin_neon_vabal_v:
+ Int = usgn ? Intrinsic::arm_neon_vabalu : Intrinsic::arm_neon_vabals;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabal");
+ case ARM::BI__builtin_neon_vabd_v:
+ case ARM::BI__builtin_neon_vabdq_v:
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd");
+ case ARM::BI__builtin_neon_vabdl_v:
+ Int = usgn ? Intrinsic::arm_neon_vabdlu : Intrinsic::arm_neon_vabdls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabdl");
+ case ARM::BI__builtin_neon_vabs_v:
+ case ARM::BI__builtin_neon_vabsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1),
+ Ops, "vabs");
+ case ARM::BI__builtin_neon_vaddhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1),
+ Ops, "vaddhn");
+ case ARM::BI__builtin_neon_vaddl_v:
+ Int = usgn ? Intrinsic::arm_neon_vaddlu : Intrinsic::arm_neon_vaddls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaddl");
+ case ARM::BI__builtin_neon_vaddw_v:
+ Int = usgn ? Intrinsic::arm_neon_vaddws : Intrinsic::arm_neon_vaddwu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaddw");
+ case ARM::BI__builtin_neon_vcale_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcage_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case ARM::BI__builtin_neon_vcaleq_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcageq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case ARM::BI__builtin_neon_vcalt_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcagt_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case ARM::BI__builtin_neon_vcaltq_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcagtq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case ARM::BI__builtin_neon_vcls_v:
+ case ARM::BI__builtin_neon_vclsq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcls");
+ }
+ case ARM::BI__builtin_neon_vclz_v:
+ case ARM::BI__builtin_neon_vclzq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vclz");
+ }
+ case ARM::BI__builtin_neon_vcnt_v:
+ case ARM::BI__builtin_neon_vcntq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcnt");
+ }
+ // FIXME: intrinsics for f16<->f32 convert missing from ARM target.
+ case ARM::BI__builtin_neon_vcvt_f32_v:
+ case ARM::BI__builtin_neon_vcvtq_f32_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ty = GetNeonType(VMContext, 4, quad);
+ return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_s32_v:
+ case ARM::BI__builtin_neon_vcvt_u32_v:
+ case ARM::BI__builtin_neon_vcvtq_s32_v:
+ case ARM::BI__builtin_neon_vcvtq_u32_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(VMContext, 4, quad));
+ return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
+ : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_n_f32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
+ const llvm::Type *Tys[2] = { GetNeonType(VMContext, 4, quad), Ty };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp;
+ Function *F = CGM.getIntrinsic(Int, Tys, 2);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case ARM::BI__builtin_neon_vcvt_n_s32_v:
+ case ARM::BI__builtin_neon_vcvt_n_u32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_s32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
+ const llvm::Type *Tys[2] = { Ty, GetNeonType(VMContext, 4, quad) };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs;
+ Function *F = CGM.getIntrinsic(Int, Tys, 2);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case ARM::BI__builtin_neon_vext_v:
+ case ARM::BI__builtin_neon_vextq_v: {
+ ConstantInt *C = dyn_cast<ConstantInt>(Ops[2]);
+ int CV = C->getSExtValue();
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
+ }
+ case ARM::BI__builtin_neon_vget_lane_i8:
+ case ARM::BI__builtin_neon_vget_lane_i16:
+ case ARM::BI__builtin_neon_vget_lane_i32:
+ case ARM::BI__builtin_neon_vget_lane_i64:
+ case ARM::BI__builtin_neon_vget_lane_f32:
+ case ARM::BI__builtin_neon_vgetq_lane_i8:
+ case ARM::BI__builtin_neon_vgetq_lane_i16:
+ case ARM::BI__builtin_neon_vgetq_lane_i32:
+ case ARM::BI__builtin_neon_vgetq_lane_i64:
+ case ARM::BI__builtin_neon_vgetq_lane_f32:
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case ARM::BI__builtin_neon_vhadd_v:
+ case ARM::BI__builtin_neon_vhaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhadd");
+ case ARM::BI__builtin_neon_vhsub_v:
+ case ARM::BI__builtin_neon_vhsubq_v:
+ Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhsub");
+ case ARM::BI__builtin_neon_vld1_v:
+ case ARM::BI__builtin_neon_vld1q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, &Ty, 1),
+ Ops, "vld1");
+ case ARM::BI__builtin_neon_vld1_lane_v:
+ case ARM::BI__builtin_neon_vld1q_lane_v:
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[0] = Builder.CreateLoad(Ops[0]);
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
+ case ARM::BI__builtin_neon_vld1_dup_v:
+ case ARM::BI__builtin_neon_vld1q_dup_v: {
+ Value *V = UndefValue::get(Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[0] = Builder.CreateLoad(Ops[0]);
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
+ return EmitNeonSplat(Ops[0], CI);
+ }
+ case ARM::BI__builtin_neon_vld2_v:
+ case ARM::BI__builtin_neon_vld2q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, &Ty, 1);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld3_v:
+ case ARM::BI__builtin_neon_vld3q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, &Ty, 1);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld4_v:
+ case ARM::BI__builtin_neon_vld4q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, &Ty, 1);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld2_lane_v:
+ case ARM::BI__builtin_neon_vld2q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, &Ty, 1);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld2_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld3_lane_v:
+ case ARM::BI__builtin_neon_vld3q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, &Ty, 1);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld4_lane_v:
+ case ARM::BI__builtin_neon_vld4q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, &Ty, 1);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
+ Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ case ARM::BI__builtin_neon_vld4_dup_v: {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ default: assert(0 && "unknown vld_dup intrinsic?");
+ }
+ Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
+
+ SmallVector<Value*, 6> Args;
+ Args.push_back(Ops[1]);
+ Args.append(STy->getNumElements(), UndefValue::get(Ty));
- case ARM::BI__builtin_thread_pointer: {
- Value *AtomF = CGM.getIntrinsic(Intrinsic::arm_thread_pointer, 0, 0);
- return Builder.CreateCall(AtomF);
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Args.push_back(CI);
+
+ Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup");
+ // splat lane 0 to all elts in each vector of the result.
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Value *Val = Builder.CreateExtractValue(Ops[1], i);
+ Value *Elt = Builder.CreateBitCast(Val, Ty);
+ Elt = EmitNeonSplat(Elt, CI);
+ Elt = Builder.CreateBitCast(Elt, Val->getType());
+ Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
+ }
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vmax_v:
+ case ARM::BI__builtin_neon_vmaxq_v:
+ Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmax");
+ case ARM::BI__builtin_neon_vmin_v:
+ case ARM::BI__builtin_neon_vminq_v:
+ Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin");
+ case ARM::BI__builtin_neon_vmlal_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vmlal_v:
+ Int = usgn ? Intrinsic::arm_neon_vmlalu : Intrinsic::arm_neon_vmlals;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlal", splat);
+ case ARM::BI__builtin_neon_vmlsl_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vmlsl_v:
+ Int = usgn ? Intrinsic::arm_neon_vmlslu : Intrinsic::arm_neon_vmlsls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlsl", splat);
+ case ARM::BI__builtin_neon_vmovl_v:
+ Int = usgn ? Intrinsic::arm_neon_vmovlu : Intrinsic::arm_neon_vmovls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmovl");
+ case ARM::BI__builtin_neon_vmovn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmovn, &Ty, 1),
+ Ops, "vmovn");
+ case ARM::BI__builtin_neon_vmull_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vmull_v:
+ Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
+ Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlal", splat);
+ case ARM::BI__builtin_neon_vpadal_v:
+ case ARM::BI__builtin_neon_vpadalq_v:
+ Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpadal");
+ case ARM::BI__builtin_neon_vpadd_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, &Ty, 1),
+ Ops, "vpadd");
+ case ARM::BI__builtin_neon_vpaddl_v:
+ case ARM::BI__builtin_neon_vpaddlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpaddl");
+ case ARM::BI__builtin_neon_vpmax_v:
+ Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmax");
+ case ARM::BI__builtin_neon_vpmin_v:
+ Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmin");
+ case ARM::BI__builtin_neon_vqabs_v:
+ case ARM::BI__builtin_neon_vqabsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, &Ty, 1),
+ Ops, "vqabs");
+ case ARM::BI__builtin_neon_vqadd_v:
+ case ARM::BI__builtin_neon_vqaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqadd");
+ case ARM::BI__builtin_neon_vqdmlal_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmlal_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, &Ty, 1),
+ Ops, "vqdmlal", splat);
+ case ARM::BI__builtin_neon_vqdmlsl_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmlsl_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, &Ty, 1),
+ Ops, "vqdmlsl", splat);
+ case ARM::BI__builtin_neon_vqdmulh_lane_v:
+ case ARM::BI__builtin_neon_vqdmulhq_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmulh_v:
+ case ARM::BI__builtin_neon_vqdmulhq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, &Ty, 1),
+ Ops, "vqdmulh", splat);
+ case ARM::BI__builtin_neon_vqdmull_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmull_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, &Ty, 1),
+ Ops, "vqdmull", splat);
+ case ARM::BI__builtin_neon_vqmovn_v:
+ Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqmovn");
+ case ARM::BI__builtin_neon_vqmovun_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, &Ty, 1),
+ Ops, "vqdmull");
+ case ARM::BI__builtin_neon_vqneg_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, &Ty, 1),
+ Ops, "vqneg");
+ case ARM::BI__builtin_neon_vqrdmulh_lane_v:
+ case ARM::BI__builtin_neon_vqrdmulhq_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqrdmulh_v:
+ case ARM::BI__builtin_neon_vqrdmulhq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, &Ty, 1),
+ Ops, "vqrdmulh", splat);
+ case ARM::BI__builtin_neon_vqrshl_v:
+ case ARM::BI__builtin_neon_vqrshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshl");
+ case ARM::BI__builtin_neon_vqrshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n", false,
+ 1, true);
+ case ARM::BI__builtin_neon_vqrshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, &Ty, 1),
+ Ops, "vqrshrun_n", false, 1, true);
+ case ARM::BI__builtin_neon_vqshl_v:
+ case ARM::BI__builtin_neon_vqshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl");
+ case ARM::BI__builtin_neon_vqshl_n_v:
+ case ARM::BI__builtin_neon_vqshlq_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n", false,
+ 1, false);
+ case ARM::BI__builtin_neon_vqshlu_n_v:
+ case ARM::BI__builtin_neon_vqshluq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, &Ty, 1),
+ Ops, "vqshlu", 1, false);
+ case ARM::BI__builtin_neon_vqshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n", false,
+ 1, true);
+ case ARM::BI__builtin_neon_vqshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, &Ty, 1),
+ Ops, "vqshrun_n", false, 1, true);
+ case ARM::BI__builtin_neon_vqsub_v:
+ case ARM::BI__builtin_neon_vqsubq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqsub");
+ case ARM::BI__builtin_neon_vraddhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, &Ty, 1),
+ Ops, "vraddhn");
+ case ARM::BI__builtin_neon_vrecpe_v:
+ case ARM::BI__builtin_neon_vrecpeq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, &Ty, 1),
+ Ops, "vrecpe");
+ case ARM::BI__builtin_neon_vrecps_v:
+ case ARM::BI__builtin_neon_vrecpsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, &Ty, 1),
+ Ops, "vrecps");
+ case ARM::BI__builtin_neon_vrhadd_v:
+ case ARM::BI__builtin_neon_vrhaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrhadd");
+ case ARM::BI__builtin_neon_vrshl_v:
+ case ARM::BI__builtin_neon_vrshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshl");
+ case ARM::BI__builtin_neon_vrshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, &Ty, 1),
+ Ops, "vrshrn_n", false, 1, true);
+ case ARM::BI__builtin_neon_vrshr_n_v:
+ case ARM::BI__builtin_neon_vrshrq_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", false,
+ 1, true);
+ case ARM::BI__builtin_neon_vrsqrte_v:
+ case ARM::BI__builtin_neon_vrsqrteq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, &Ty, 1),
+ Ops, "vrsqrte");
+ case ARM::BI__builtin_neon_vrsqrts_v:
+ case ARM::BI__builtin_neon_vrsqrtsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, &Ty, 1),
+ Ops, "vrsqrts");
+ case ARM::BI__builtin_neon_vrsra_n_v:
+ case ARM::BI__builtin_neon_vrsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, &Ty, 1), Ops[1], Ops[2]);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
+ case ARM::BI__builtin_neon_vrsubhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, &Ty, 1),
+ Ops, "vrsubhn");
+ case ARM::BI__builtin_neon_vset_lane_i8:
+ case ARM::BI__builtin_neon_vset_lane_i16:
+ case ARM::BI__builtin_neon_vset_lane_i32:
+ case ARM::BI__builtin_neon_vset_lane_i64:
+ case ARM::BI__builtin_neon_vset_lane_f32:
+ case ARM::BI__builtin_neon_vsetq_lane_i8:
+ case ARM::BI__builtin_neon_vsetq_lane_i16:
+ case ARM::BI__builtin_neon_vsetq_lane_i32:
+ case ARM::BI__builtin_neon_vsetq_lane_i64:
+ case ARM::BI__builtin_neon_vsetq_lane_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+ case ARM::BI__builtin_neon_vshl_v:
+ case ARM::BI__builtin_neon_vshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshl");
+ case ARM::BI__builtin_neon_vshll_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", false, 1);
+ case ARM::BI__builtin_neon_vshl_n_v:
+ case ARM::BI__builtin_neon_vshlq_n_v:
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
+ case ARM::BI__builtin_neon_vshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, &Ty, 1),
+ Ops, "vshrn_n", false, 1, true);
+ case ARM::BI__builtin_neon_vshr_n_v:
+ case ARM::BI__builtin_neon_vshrq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ if (usgn)
+ return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
+ else
+ return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
+ case ARM::BI__builtin_neon_vsri_n_v:
+ case ARM::BI__builtin_neon_vsriq_n_v:
+ poly = true;
+ case ARM::BI__builtin_neon_vsli_n_v:
+ case ARM::BI__builtin_neon_vsliq_n_v:
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, poly);
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, &Ty, 1),
+ Ops, "vsli_n");
+ case ARM::BI__builtin_neon_vsra_n_v:
+ case ARM::BI__builtin_neon_vsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
+ if (usgn)
+ Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
+ else
+ Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
+ return Builder.CreateAdd(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vst1_v:
+ case ARM::BI__builtin_neon_vst1q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst1_lane_v:
+ case ARM::BI__builtin_neon_vst1q_lane_v:
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
+ case ARM::BI__builtin_neon_vst2_v:
+ case ARM::BI__builtin_neon_vst2q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst2_lane_v:
+ case ARM::BI__builtin_neon_vst2q_lane_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst3_v:
+ case ARM::BI__builtin_neon_vst3q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst3_lane_v:
+ case ARM::BI__builtin_neon_vst3q_lane_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst4_v:
+ case ARM::BI__builtin_neon_vst4q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst4_lane_v:
+ case ARM::BI__builtin_neon_vst4q_lane_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vsubhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1),
+ Ops, "vsubhn");
+ case ARM::BI__builtin_neon_vsubl_v:
+ Int = usgn ? Intrinsic::arm_neon_vsublu : Intrinsic::arm_neon_vsubls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vsubl");
+ case ARM::BI__builtin_neon_vsubw_v:
+ Int = usgn ? Intrinsic::arm_neon_vsubws : Intrinsic::arm_neon_vsubwu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vsubw");
+ case ARM::BI__builtin_neon_vtbl1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
+ Ops, "vtbl1");
+ case ARM::BI__builtin_neon_vtbl2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
+ Ops, "vtbl2");
+ case ARM::BI__builtin_neon_vtbl3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
+ Ops, "vtbl3");
+ case ARM::BI__builtin_neon_vtbl4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
+ Ops, "vtbl4");
+ case ARM::BI__builtin_neon_vtbx1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
+ Ops, "vtbx1");
+ case ARM::BI__builtin_neon_vtbx2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
+ Ops, "vtbx2");
+ case ARM::BI__builtin_neon_vtbx3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
+ Ops, "vtbx3");
+ case ARM::BI__builtin_neon_vtbx4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
+ Ops, "vtbx4");
+ case ARM::BI__builtin_neon_vtst_v:
+ case ARM::BI__builtin_neon_vtstq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
+ Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
+ ConstantAggregateZero::get(Ty));
+ return Builder.CreateSExt(Ops[0], Ty, "vtst");
+ }
+ case ARM::BI__builtin_neon_vtrn_v:
+ case ARM::BI__builtin_neon_vtrnq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
+ Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ case ARM::BI__builtin_neon_vuzp_v:
+ case ARM::BI__builtin_neon_vuzpq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
+
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ case ARM::BI__builtin_neon_vzip_v:
+ case ARM::BI__builtin_neon_vzipq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, (i >> 1)));
+ Indices.push_back(ConstantInt::get(Int32Ty, (i >> 1)+e));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
}
}
}
@@ -900,9 +1657,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrldi128:
case X86::BI__builtin_ia32_psrlqi128:
case X86::BI__builtin_ia32_psrlwi128: {
- Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
- const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2);
- llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
+ llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
Ops[1], Zero, "insert");
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
@@ -955,8 +1712,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrldi:
case X86::BI__builtin_ia32_psrlqi:
case X86::BI__builtin_ia32_psrlwi: {
- Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
- const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1);
+ Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
@@ -1009,16 +1766,16 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_ldmxcsr: {
const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
- Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp, PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
- Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
Builder.CreateBitCast(Tmp, PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
@@ -1033,16 +1790,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
- const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
- llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
- llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
// cast val v2i64
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
// extract (0, 1)
unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
- llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index);
+ llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index);
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
// cast pointer to i64 & store
@@ -1055,11 +1811,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors less than 9 bytes,
// emit a shuffle instruction.
if (shiftVal <= 8) {
- const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
-
llvm::SmallVector<llvm::Constant*, 8> Indices;
for (unsigned i = 0; i != 8; ++i)
- Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
@@ -1069,8 +1823,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// than 16 bytes, emit a logical right shift of the destination.
if (shiftVal < 16) {
// MMX has these as 1 x i64 vectors for some odd optimization reasons.
- const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
- const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 1);
+ const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
@@ -1089,11 +1842,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors less than 17 bytes,
// emit a shuffle instruction.
if (shiftVal <= 16) {
- const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
-
llvm::SmallVector<llvm::Constant*, 16> Indices;
for (unsigned i = 0; i != 16; ++i)
- Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
@@ -1102,12 +1853,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors more than 16 but less
// than 32 bytes, emit a logical right shift of the destination.
if (shiftVal < 32) {
- const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
- const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
- const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+ const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
- Ops[1] = llvm::ConstantInt::get(IntTy, (shiftVal-16) * 8);
+ Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
@@ -1132,6 +1881,48 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
default: return 0;
+ // vec_ld, vec_lvsl, vec_lvsr
+ case PPC::BI__builtin_altivec_lvx:
+ case PPC::BI__builtin_altivec_lvxl:
+ case PPC::BI__builtin_altivec_lvebx:
+ case PPC::BI__builtin_altivec_lvehx:
+ case PPC::BI__builtin_altivec_lvewx:
+ case PPC::BI__builtin_altivec_lvsl:
+ case PPC::BI__builtin_altivec_lvsr:
+ {
+ Ops[1] = Builder.CreateBitCast(Ops[1], llvm::Type::getInt8PtrTy(VMContext));
+
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp");
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported ld/lvsl/lvsr intrinsic!");
+ case PPC::BI__builtin_altivec_lvx:
+ ID = Intrinsic::ppc_altivec_lvx;
+ break;
+ case PPC::BI__builtin_altivec_lvxl:
+ ID = Intrinsic::ppc_altivec_lvxl;
+ break;
+ case PPC::BI__builtin_altivec_lvebx:
+ ID = Intrinsic::ppc_altivec_lvebx;
+ break;
+ case PPC::BI__builtin_altivec_lvehx:
+ ID = Intrinsic::ppc_altivec_lvehx;
+ break;
+ case PPC::BI__builtin_altivec_lvewx:
+ ID = Intrinsic::ppc_altivec_lvewx;
+ break;
+ case PPC::BI__builtin_altivec_lvsl:
+ ID = Intrinsic::ppc_altivec_lvsl;
+ break;
+ case PPC::BI__builtin_altivec_lvsr:
+ ID = Intrinsic::ppc_altivec_lvsr;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ }
+
// vec_st
case PPC::BI__builtin_altivec_stvx:
case PPC::BI__builtin_altivec_stvxl:
@@ -1140,12 +1931,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvewx:
{
Ops[2] = Builder.CreateBitCast(Ops[2], llvm::Type::getInt8PtrTy(VMContext));
- Ops[1] = !isa<Constant>(Ops[1]) || !cast<Constant>(Ops[1])->isNullValue()
- ? Builder.CreateGEP(Ops[2], Ops[1], "tmp") : Ops[2];
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp");
Ops.pop_back();
switch (BuiltinID) {
- default: assert(0 && "Unsupported vavg intrinsic!");
+ default: assert(0 && "Unsupported st intrinsic!");
case PPC::BI__builtin_altivec_stvx:
ID = Intrinsic::ppc_altivec_stvx;
break;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
index 5258779..7b7be9a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
@@ -23,7 +23,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtCXX.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace CodeGen;
@@ -97,8 +97,8 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
/// If we don't have a definition for the destructor yet, don't
/// emit. We can't emit aliases to declarations; that's just not
/// how aliases work.
- const CXXDestructorDecl *BaseD = UniqueBase->getDestructor(getContext());
- if (!BaseD->isImplicit() && !BaseD->getBody())
+ const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
+ if (!BaseD->isImplicit() && !BaseD->hasBody())
return true;
// If the base is at a non-zero offset, give up.
@@ -166,8 +166,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule());
// Switch any previous uses to the alias.
- MangleBuffer MangledName;
- getMangledName(MangledName, AliasDecl);
+ llvm::StringRef MangledName = getMangledName(AliasDecl);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
assert(Entry->isDeclaration() && "definition already exists for alias");
@@ -177,7 +176,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
Entry->replaceAllUsesWith(Alias);
Entry->eraseFromParent();
} else {
- Alias->setName(MangledName.getString());
+ Alias->setName(MangledName);
}
// Finally, set up the alias with its proper name and attributes.
@@ -218,8 +217,9 @@ void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
llvm::GlobalValue *
CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
CXXCtorType Type) {
- MangleBuffer Name;
- getMangledCXXCtorName(Name, D, Type);
+ GlobalDecl GD(D, Type);
+
+ llvm::StringRef Name = getMangledName(GD);
if (llvm::GlobalValue *V = GetGlobalValue(Name))
return V;
@@ -227,18 +227,7 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type),
FPT->isVariadic());
- return cast<llvm::Function>(
- GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
-}
-
-void CodeGenModule::getMangledName(MangleBuffer &Buffer, const BlockDecl *BD) {
- getMangleContext().mangleBlock(BD, Buffer.getBuffer());
-}
-
-void CodeGenModule::getMangledCXXCtorName(MangleBuffer &Name,
- const CXXConstructorDecl *D,
- CXXCtorType Type) {
- getMangleContext().mangleCXXCtor(D, Type, Name.getBuffer());
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
}
void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
@@ -286,22 +275,54 @@ void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
llvm::GlobalValue *
CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type) {
- MangleBuffer Name;
- getMangledCXXDtorName(Name, D, Type);
+ GlobalDecl GD(D, Type);
+
+ llvm::StringRef Name = getMangledName(GD);
if (llvm::GlobalValue *V = GetGlobalValue(Name))
return V;
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false);
- return cast<llvm::Function>(
- GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
}
-void CodeGenModule::getMangledCXXDtorName(MangleBuffer &Name,
- const CXXDestructorDecl *D,
- CXXDtorType Type) {
- getMangleContext().mangleCXXDtor(D, Type, Name.getBuffer());
+llvm::Constant *
+CodeGenModule::GetCXXMemberFunctionPointerValue(const CXXMethodDecl *MD) {
+ assert(MD->isInstance() && "Member function must not be static!");
+
+ MD = MD->getCanonicalDecl();
+
+ const llvm::Type *PtrDiffTy = Types.ConvertType(Context.getPointerDiffType());
+
+ // Get the function pointer (or index if this is a virtual function).
+ if (MD->isVirtual()) {
+ uint64_t Index = VTables.getMethodVTableIndex(MD);
+
+ // FIXME: We shouldn't use / 8 here.
+ uint64_t PointerWidthInBytes = Context.Target.getPointerWidth(0) / 8;
+
+ // Itanium C++ ABI 2.3:
+ // For a non-virtual function, this field is a simple function pointer.
+ // For a virtual function, it is 1 plus the virtual table offset
+ // (in bytes) of the function, represented as a ptrdiff_t.
+ return llvm::ConstantInt::get(PtrDiffTy, (Index * PointerWidthInBytes) + 1);
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty;
+ // Check whether the function has a computable LLVM signature.
+ if (!CodeGenTypes::VerifyFuncTypeComplete(FPT)) {
+ // The function has a computable LLVM signature; use the correct type.
+ Ty = Types.GetFunctionType(Types.getFunctionInfo(MD), FPT->isVariadic());
+ } else {
+ // Use an arbitrary non-function type to tell GetAddrOfFunction that the
+ // function type is incomplete.
+ Ty = PtrDiffTy;
+ }
+
+ llvm::Constant *FuncPtr = GetAddrOfFunction(MD, Ty);
+ return llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy);
}
static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
index a7e1871..e1bbb0a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
@@ -31,6 +31,7 @@ public:
/// Creates an instance of a C++ ABI class.
CXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
+CXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM);
}
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
index 73cee3c..3d1e143 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -13,26 +13,22 @@
//===----------------------------------------------------------------------===//
#include "CGCall.h"
+#include "ABIInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Attributes.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Target/TargetData.h"
-
-#include "ABIInfo.h"
-
using namespace clang;
using namespace CodeGen;
/***/
-// FIXME: Use iterator and sidestep silly type array creation.
-
static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
switch (CC) {
default: return llvm::CallingConv::C;
@@ -65,29 +61,31 @@ static CanQualType GetReturnType(QualType RetTy) {
}
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
+CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP,
+ bool IsRecursive) {
return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
llvm::SmallVector<CanQualType, 16>(),
- FTNP->getExtInfo());
+ FTNP->getExtInfo(), IsRecursive);
}
/// \param Args - contains any initial parameters besides those
/// in the formal type
static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
llvm::SmallVectorImpl<CanQualType> &ArgTys,
- CanQual<FunctionProtoType> FTP) {
+ CanQual<FunctionProtoType> FTP,
+ bool IsRecursive = false) {
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
ArgTys.push_back(FTP->getArgType(i));
CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
- return CGT.getFunctionInfo(ResTy, ArgTys,
- FTP->getExtInfo());
+ return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive);
}
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
+CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP,
+ bool IsRecursive) {
llvm::SmallVector<CanQualType, 16> ArgTys;
- return ::getFunctionInfo(*this, ArgTys, FTP);
+ return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive);
}
static CallingConv getCallingConventionForDecl(const Decl *D) {
@@ -220,7 +218,8 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info) {
+ const FunctionType::ExtInfo &Info,
+ bool IsRecursive) {
#ifndef NDEBUG
for (llvm::SmallVectorImpl<CanQualType>::const_iterator
I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
@@ -240,35 +239,65 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
return *FI;
// Construct the function info.
- FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, ArgTys);
+ FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy,
+ ArgTys.data(), ArgTys.size());
FunctionInfos.InsertNode(FI, InsertPos);
+ // ABI lowering wants to know what our preferred type for the argument is in
+ // various situations, pass it in.
+ llvm::SmallVector<const llvm::Type *, 8> PreferredArgTypes;
+ for (llvm::SmallVectorImpl<CanQualType>::const_iterator
+ I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) {
+ // If this is being called from the guts of the ConvertType loop, make sure
+ // to call ConvertTypeRecursive so we don't get into issues with cyclic
+ // pointer type structures.
+ PreferredArgTypes.push_back(ConvertTypeRecursive(*I));
+ }
+
// Compute ABI information.
- getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
-
+ getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext(),
+ PreferredArgTypes.data(), PreferredArgTypes.size());
+
+ // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
+ // types, resolve them now. These pointers may point to this function, which
+ // we *just* filled in the FunctionInfo for.
+ if (!IsRecursive && !PointersToResolve.empty()) {
+ // Use PATypeHolder's so that our preferred types don't dangle under
+ // refinement.
+ llvm::SmallVector<llvm::PATypeHolder, 8> Handles(PreferredArgTypes.begin(),
+ PreferredArgTypes.end());
+ HandleLateResolvedPointers();
+ PreferredArgTypes.clear();
+ PreferredArgTypes.append(Handles.begin(), Handles.end());
+ }
+
+
return *FI;
}
CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
- bool _NoReturn,
- unsigned _RegParm,
+ bool _NoReturn, unsigned _RegParm,
CanQualType ResTy,
- const llvm::SmallVectorImpl<CanQualType> &ArgTys)
+ const CanQualType *ArgTys,
+ unsigned NumArgTys)
: CallingConvention(_CallingConvention),
EffectiveCallingConvention(_CallingConvention),
NoReturn(_NoReturn), RegParm(_RegParm)
{
- NumArgs = ArgTys.size();
- Args = new ArgInfo[1 + NumArgs];
+ NumArgs = NumArgTys;
+
+ // FIXME: Coallocate with the CGFunctionInfo object.
+ Args = new ArgInfo[1 + NumArgTys];
Args[0].type = ResTy;
- for (unsigned i = 0; i < NumArgs; ++i)
+ for (unsigned i = 0; i != NumArgTys; ++i)
Args[1 + i].type = ArgTys[i];
}
/***/
void CodeGenTypes::GetExpandedTypes(QualType Ty,
- std::vector<const llvm::Type*> &ArgTys) {
+ std::vector<const llvm::Type*> &ArgTys,
+ bool IsRecursive) {
const RecordType *RT = Ty->getAsStructureType();
assert(RT && "Can only expand structure types.");
const RecordDecl *RD = RT->getDecl();
@@ -283,9 +312,9 @@ void CodeGenTypes::GetExpandedTypes(QualType Ty,
QualType FT = FD->getType();
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
- GetExpandedTypes(FT, ArgTys);
+ GetExpandedTypes(FT, ArgTys, IsRecursive);
} else {
- ArgTys.push_back(ConvertType(FT));
+ ArgTys.push_back(ConvertType(FT, IsRecursive));
}
}
}
@@ -345,6 +374,71 @@ CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
}
}
+/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
+/// accessing some number of bytes out of it, try to gep into the struct to get
+/// at its inner goodness. Dive as deep as possible without entering an element
+/// with an in-memory size smaller than DstSize.
+static llvm::Value *
+EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
+ const llvm::StructType *SrcSTy,
+ uint64_t DstSize, CodeGenFunction &CGF) {
+ // We can't dive into a zero-element struct.
+ if (SrcSTy->getNumElements() == 0) return SrcPtr;
+
+ const llvm::Type *FirstElt = SrcSTy->getElementType(0);
+
+ // If the first elt is at least as large as what we're looking for, or if the
+ // first element is the same size as the whole struct, we can enter it.
+ uint64_t FirstEltSize =
+ CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
+ if (FirstEltSize < DstSize &&
+ FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
+ return SrcPtr;
+
+ // GEP into the first element.
+ SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
+
+ // If the first element is a struct, recurse.
+ const llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
+ return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+
+ return SrcPtr;
+}
+
+/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
+/// are either integers or pointers. This does a truncation of the value if it
+/// is too large or a zero extension if it is too small.
+static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
+ const llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ if (Val->getType() == Ty)
+ return Val;
+
+ if (isa<llvm::PointerType>(Val->getType())) {
+ // If this is Pointer->Pointer avoid conversion to and from int.
+ if (isa<llvm::PointerType>(Ty))
+ return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
+
+ // Convert the pointer to an integer so we can play with its width.
+ Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
+ }
+
+ const llvm::Type *DestIntTy = Ty;
+ if (isa<llvm::PointerType>(DestIntTy))
+ DestIntTy = CGF.IntPtrTy;
+
+ if (Val->getType() != DestIntTy)
+ Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
+
+ if (isa<llvm::PointerType>(Ty))
+ Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
+ return Val;
+}
+
+
+
/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
/// a pointer to an object of type \arg Ty.
///
@@ -356,9 +450,28 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
CodeGenFunction &CGF) {
const llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
- uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+
+ // If SrcTy and Ty are the same, just do a load.
+ if (SrcTy == Ty)
+ return CGF.Builder.CreateLoad(SrcPtr);
+
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+
+ if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
+ SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+ SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ }
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
+ (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
+ return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
+ }
+
// If load is legal, just bitcast the src pointer.
if (SrcSize >= DstSize) {
// Generally SrcSize is never greater than DstSize, since this means we are
@@ -373,18 +486,18 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
// FIXME: Use better alignment / avoid requiring aligned load.
Load->setAlignment(1);
return Load;
- } else {
- // Otherwise do coercion through memory. This is stupid, but
- // simple.
- llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
- llvm::Value *Casted =
- CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
- llvm::StoreInst *Store =
- CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
- // FIXME: Use better alignment / avoid requiring aligned store.
- Store->setAlignment(1);
- return CGF.Builder.CreateLoad(Tmp);
}
+
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
+ llvm::StoreInst *Store =
+ CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ Store->setAlignment(1);
+ return CGF.Builder.CreateLoad(Tmp);
}
/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
@@ -399,8 +512,27 @@ static void CreateCoercedStore(llvm::Value *Src,
const llvm::Type *SrcTy = Src->getType();
const llvm::Type *DstTy =
cast<llvm::PointerType>(DstPtr->getType())->getElementType();
-
+ if (SrcTy == DstTy) {
+ CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
+ return;
+ }
+
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+
+ if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
+ DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
+ DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ }
+
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
+ (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
+ Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
+ CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
+ return;
+ }
+
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
// If store is legal, just bitcast the src pointer.
@@ -432,10 +564,28 @@ static void CreateCoercedStore(llvm::Value *Src,
/***/
-bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
+bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
return FI.getReturnInfo().isIndirect();
}
+bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
+ if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ default:
+ return false;
+ case BuiltinType::Float:
+ return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
+ case BuiltinType::Double:
+ return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
+ case BuiltinType::LongDouble:
+ return getContext().Target.useObjCFPRetForRealType(
+ TargetInfo::LongDouble);
+ }
+ }
+
+ return false;
+}
+
const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
const CGFunctionInfo &FI = getFunctionInfo(GD);
@@ -445,11 +595,12 @@ const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
Variadic = FPT->isVariadic();
- return GetFunctionType(FI, Variadic);
+ return GetFunctionType(FI, Variadic, false);
}
const llvm::FunctionType *
-CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
+ bool IsRecursive) {
std::vector<const llvm::Type*> ArgTys;
const llvm::Type *ResultType = 0;
@@ -462,13 +613,13 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- ResultType = ConvertType(RetTy);
+ ResultType = ConvertType(RetTy, IsRecursive);
break;
case ABIArgInfo::Indirect: {
assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
ResultType = llvm::Type::getVoidTy(getLLVMContext());
- const llvm::Type *STy = ConvertType(RetTy);
+ const llvm::Type *STy = ConvertType(RetTy, IsRecursive);
ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
break;
}
@@ -490,24 +641,34 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
case ABIArgInfo::Ignore:
break;
- case ABIArgInfo::Coerce:
- ArgTys.push_back(AI.getCoerceToType());
+ case ABIArgInfo::Coerce: {
+ // If the coerce-to type is a first class aggregate, flatten it. Either
+ // way is semantically identical, but fast-isel and the optimizer
+ // generally likes scalar values better than FCAs.
+ const llvm::Type *ArgTy = AI.getCoerceToType();
+ if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) {
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ ArgTys.push_back(STy->getElementType(i));
+ } else {
+ ArgTys.push_back(ArgTy);
+ }
break;
+ }
case ABIArgInfo::Indirect: {
// indirect arguments are always on the stack, which is addr space #0.
- const llvm::Type *LTy = ConvertTypeForMem(it->type);
+ const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
break;
}
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- ArgTys.push_back(ConvertType(it->type));
+ ArgTys.push_back(ConvertType(it->type, IsRecursive));
break;
case ABIArgInfo::Expand:
- GetExpandedTypes(it->type, ArgTys);
+ GetExpandedTypes(it->type, ArgTys, IsRecursive);
break;
}
}
@@ -515,28 +676,12 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
}
-static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) {
- if (const TagType *TT = T->getResultType()->getAs<TagType>()) {
- if (!TT->getDecl()->isDefinition())
- return true;
- }
-
- for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
- if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) {
- if (!TT->getDecl()->isDefinition())
- return true;
- }
- }
-
- return false;
-}
-
const llvm::Type *
CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- if (!HasIncompleteReturnTypeOrArgumentTypes(FPT))
- return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
+ if (!VerifyFuncTypeComplete(FPT))
+ return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic(), false);
return llvm::OpaqueType::get(getLLVMContext());
}
@@ -557,6 +702,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (TargetDecl) {
if (TargetDecl->hasAttr<NoThrowAttr>())
FuncAttrs |= llvm::Attribute::NoUnwind;
+ else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
+ const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
+ if (FPT && FPT->hasEmptyExceptionSpec())
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ }
+
if (TargetDecl->hasAttr<NoReturnAttr>())
FuncAttrs |= llvm::Attribute::NoReturn;
if (TargetDecl->hasAttr<ConstAttr>())
@@ -626,7 +777,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
switch (AI.getKind()) {
case ABIArgInfo::Coerce:
- break;
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(AI.getCoerceToType()))
+ Index += STy->getNumElements();
+ else
+ ++Index;
+ continue; // Skip index increment.
case ABIArgInfo::Indirect:
if (AI.getIndirectByVal())
@@ -666,7 +822,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// FIXME: This is rather inefficient. Do we ever actually need to do
// anything here? The result should be just reconstructed on the other
// side, so extension should be a non-issue.
- getTypes().GetExpandedTypes(ParamType, Tys);
+ getTypes().GetExpandedTypes(ParamType, Tys, false);
Index += Tys.size();
continue;
}
@@ -687,7 +843,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// initialize the return value. TODO: it might be nice to have
// a more general mechanism for this that didn't require synthesized
// return statements.
- if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
if (FD->hasImplicitReturnZero()) {
QualType RetTy = FD->getResultType().getUnqualifiedType();
const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
@@ -703,7 +859,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function::arg_iterator AI = Fn->arg_begin();
// Name the struct return argument.
- if (CGM.ReturnTypeUsesSret(FI)) {
+ if (CGM.ReturnTypeUsesSRet(FI)) {
AI->setName("agg.result");
++AI;
}
@@ -719,7 +875,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
switch (ArgI.getKind()) {
case ABIArgInfo::Indirect: {
- llvm::Value* V = AI;
+ llvm::Value *V = AI;
if (hasAggregateLLVMType(Ty)) {
// Do nothing, aggregates and complex variables are accessed by
// reference.
@@ -739,7 +895,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
assert(AI != Fn->arg_end() && "Argument mismatch!");
- llvm::Value* V = AI;
+ llvm::Value *V = AI;
if (hasAggregateLLVMType(Ty)) {
// Create a temporary alloca to hold the argument; the rest of
// codegen expects to access aggregates & complex values by
@@ -789,12 +945,35 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
continue;
case ABIArgInfo::Coerce: {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
// FIXME: This is very wasteful; EmitParmDecl is just going to drop the
// result in a new alloca anyway, so we could just store into that
// directly if we broke the abstraction down more.
- llvm::Value *V = CreateMemTemp(Ty, "coerce");
- CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
+ Alloca->setAlignment(getContext().getDeclAlign(Arg).getQuantity());
+ llvm::Value *V = Alloca;
+
+ // If the coerce-to type is a first class aggregate, we flatten it and
+ // pass the elements. Either way is semantically identical, but fast-isel
+ // and the optimizer generally likes scalar values better than FCAs.
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
+ llvm::Value *Ptr = V;
+ Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
+ } else {
+ // Simple case, just do a coerced store of the argument into the alloca.
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce");
+ CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this);
+ }
+
+
// Match to what EmitParmDecl is expecting for this type.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
V = EmitLoadOfScalar(V, false, Ty);
@@ -805,7 +984,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
}
}
EmitParmDecl(*Arg, V);
- break;
+ continue; // Skip ++AI increment, already done.
}
}
@@ -814,52 +993,73 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(AI == Fn->arg_end() && "Argument mismatch!");
}
-void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
- llvm::Value *ReturnValue) {
- llvm::Value *RV = 0;
-
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// Functions with no result always return void.
- if (ReturnValue) {
- QualType RetTy = FI.getReturnType();
- const ABIArgInfo &RetAI = FI.getReturnInfo();
-
- switch (RetAI.getKind()) {
- case ABIArgInfo::Indirect:
- if (RetTy->isAnyComplexType()) {
- ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
- StoreComplexToAddr(RT, CurFn->arg_begin(), false);
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- // Do nothing; aggregrates get evaluated directly into the destination.
- } else {
- EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
- false, RetTy);
- }
- break;
-
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- // The internal return value temp always will have
- // pointer-to-return-type type.
- RV = Builder.CreateLoad(ReturnValue);
- break;
+ if (ReturnValue == 0) {
+ Builder.CreateRetVoid();
+ return;
+ }
- case ABIArgInfo::Ignore:
- break;
+ llvm::MDNode *RetDbgInfo = 0;
+ llvm::Value *RV = 0;
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
- case ABIArgInfo::Coerce:
- RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
- break;
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RetTy->isAnyComplexType()) {
+ ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
+ StoreComplexToAddr(RT, CurFn->arg_begin(), false);
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Do nothing; aggregrates get evaluated directly into the destination.
+ } else {
+ EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
+ false, RetTy);
+ }
+ break;
- case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // The internal return value temp always will have pointer-to-return-type
+ // type, just do a load.
+
+ // If the instruction right before the insertion point is a store to the
+ // return value, we can elide the load, zap the store, and usually zap the
+ // alloca.
+ llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
+ llvm::StoreInst *SI = 0;
+ if (InsertBB->empty() ||
+ !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
+ SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
+ RV = Builder.CreateLoad(ReturnValue);
+ } else {
+ // Get the stored value and nuke the now-dead store.
+ RetDbgInfo = SI->getDbgMetadata();
+ RV = SI->getValueOperand();
+ SI->eraseFromParent();
+
+ // If that was the only use of the return value, nuke it as well now.
+ if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
+ cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
+ ReturnValue = 0;
+ }
}
+ break;
}
+ case ABIArgInfo::Ignore:
+ break;
- if (RV) {
- Builder.CreateRet(RV);
- } else {
- Builder.CreateRetVoid();
+ case ABIArgInfo::Coerce:
+ RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+ break;
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
}
+
+ llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
+ if (RetDbgInfo)
+ Ret->setDbgMetadata(RetDbgInfo);
}
RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
@@ -894,11 +1094,29 @@ RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
if (ArgType->isReferenceType())
- return EmitReferenceBindingToExpr(E);
+ return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
return EmitAnyExprToTemp(E);
}
+/// Emits a call or invoke instruction to the given function, depending
+/// on the current state of the EH stack.
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ llvm::Value * const *ArgBegin,
+ llvm::Value * const *ArgEnd,
+ const llvm::Twine &Name) {
+ llvm::BasicBlock *InvokeDest = getInvokeDest();
+ if (!InvokeDest)
+ return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name);
+
+ llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
+ llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
+ ArgBegin, ArgEnd, Name);
+ EmitBlock(ContBB);
+ return Invoke;
+}
+
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
@@ -916,7 +1134,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
- if (CGM.ReturnTypeUsesSret(CallInfo)) {
+ if (CGM.ReturnTypeUsesSRet(CallInfo)) {
llvm::Value *Value = ReturnValue.getValue();
if (!Value)
Value = CreateMemTemp(RetTy);
@@ -973,8 +1191,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
} else
SrcPtr = RV.getAggregateAddr();
- Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
- *this));
+
+ // If the coerce-to type is a first class aggregate, we flatten it and
+ // pass the elements. Either way is semantically identical, but fast-isel
+ // and the optimizer generally likes scalar values better than FCAs.
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(STy));
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
+ Args.push_back(Builder.CreateLoad(EltPtr));
+ }
+ } else {
+ // In the simple case, just pass the coerced loaded value.
+ Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+ *this));
+ }
+
break;
}
@@ -1014,15 +1248,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
- llvm::BasicBlock *InvokeDest = getInvokeDest();
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
AttributeList.end());
+ llvm::BasicBlock *InvokeDest = 0;
+ if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
+ InvokeDest = getInvokeDest();
+
llvm::CallSite CS;
- if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
+ if (!InvokeDest) {
CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
@@ -1030,9 +1267,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Args.data(), Args.data()+Args.size());
EmitBlock(Cont);
}
- if (callOrInvoke) {
+ if (callOrInvoke)
*callOrInvoke = CS.getInstruction();
- }
CS.setAttributes(Attrs);
CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
index 31c8aac..41e707a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
@@ -83,11 +83,9 @@ namespace CodeGen {
typedef const ArgInfo *const_arg_iterator;
typedef ArgInfo *arg_iterator;
- CGFunctionInfo(unsigned CallingConvention,
- bool NoReturn,
- unsigned RegParm,
- CanQualType ResTy,
- const llvm::SmallVectorImpl<CanQualType> &ArgTys);
+ CGFunctionInfo(unsigned CallingConvention, bool NoReturn,
+ unsigned RegParm, CanQualType ResTy,
+ const CanQualType *ArgTys, unsigned NumArgTys);
~CGFunctionInfo() { delete[] Args; }
const_arg_iterator arg_begin() const { return Args + 1; }
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
index bebea54..c50fe90 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
@@ -340,9 +340,9 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor()) {
// FIXME: Is this OK for C++0x delegating constructors?
- CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+ CodeGenFunction::CleanupBlock Cleanup(CGF, EHCleanup);
- CXXDestructorDecl *DD = BaseClassDecl->getDestructor(CGF.getContext());
+ CXXDestructorDecl *DD = BaseClassDecl->getDestructor();
CGF.EmitCXXDestructorCall(DD, Dtor_Base, isBaseVirtual, V);
}
}
@@ -354,7 +354,7 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
QualType T,
unsigned Index) {
if (Index == MemberInit->getNumArrayIndices()) {
- CodeGenFunction::CleanupScope Cleanups(CGF);
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
llvm::Value *Dest = LHS.getAddress();
if (ArrayIndexVar) {
@@ -410,7 +410,7 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
{
- CodeGenFunction::CleanupScope Cleanups(CGF);
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
// Inside the loop body recurse to emit the inner loop or, eventually, the
// constructor call.
@@ -461,13 +461,12 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// was implicitly generated, we shouldn't be zeroing memory.
RValue RHS;
if (FieldType->isReferenceType()) {
- RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(),
- /*IsInitializer=*/true);
+ RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(), Field);
CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
} else if (FieldType->isArrayType() && !MemberInit->getInit()) {
CGF.EmitNullInitialization(LHS.getAddress(), Field->getType());
} else if (!CGF.hasAggregateLLVMType(Field->getType())) {
- RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit(), true));
+ RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
} else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
@@ -535,12 +534,12 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (!RD->hasTrivialDestructor()) {
// FIXME: Is this OK for C++0x delegating constructors?
- CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+ CodeGenFunction::CleanupBlock Cleanup(CGF, EHCleanup);
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
- CXXDestructorDecl *DD = RD->getDestructor(CGF.getContext());
+ CXXDestructorDecl *DD = RD->getDestructor();
CGF.EmitCXXDestructorCall(DD, Dtor_Complete, /*ForVirtualBase=*/false,
LHS.getAddress());
}
@@ -607,13 +606,11 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// Enter the function-try-block before the constructor prologue if
// applicable.
- CXXTryStmtInfo TryInfo;
bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
-
if (IsTryBody)
- TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body));
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
- unsigned CleanupStackSize = CleanupEntries.size();
+ EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
// Emit the constructor prologue, i.e. the base and member
// initializers.
@@ -629,10 +626,10 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// initializers, which includes (along the exceptional path) the
// destructors for those members and bases that were fully
// constructed.
- EmitCleanupBlocks(CleanupStackSize);
+ PopCleanupBlocks(CleanupDepth);
if (IsTryBody)
- ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo);
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
}
/// EmitCtorPrologue - This routine generates necessary code to initialize
@@ -649,9 +646,6 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
B != E; ++B) {
CXXBaseOrMemberInitializer *Member = (*B);
- assert(LiveTemporaries.empty() &&
- "Should not have any live temporaries at initializer start!");
-
if (Member->isBaseInitializer())
EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
else
@@ -660,12 +654,8 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
InitializeVTablePointers(ClassDecl);
- for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I) {
- assert(LiveTemporaries.empty() &&
- "Should not have any live temporaries at initializer start!");
-
+ for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I)
EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
- }
}
/// EmitDestructorBody - Emits the body of the current destructor.
@@ -679,14 +669,33 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// anything else --- unless we're in a deleting destructor, in which
// case we're just going to call the complete destructor and then
// call operator delete() on the way out.
- CXXTryStmtInfo TryInfo;
bool isTryBody = (DtorType != Dtor_Deleting &&
Body && isa<CXXTryStmt>(Body));
if (isTryBody)
- TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body));
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
- llvm::BasicBlock *DtorEpilogue = createBasicBlock("dtor.epilogue");
- PushCleanupBlock(DtorEpilogue);
+ // Emit the destructor epilogue now. If this is a complete
+ // destructor with a function-try-block, perform the base epilogue
+ // as well.
+ //
+ // FIXME: This isn't really right, because an exception in the
+ // non-EH epilogue should jump to the appropriate place in the
+ // EH epilogue.
+ {
+ CleanupBlock Cleanup(*this, NormalCleanup);
+
+ if (isTryBody && DtorType == Dtor_Complete)
+ EmitDtorEpilogue(Dtor, Dtor_Base);
+ EmitDtorEpilogue(Dtor, DtorType);
+
+ if (Exceptions) {
+ Cleanup.beginEHCleanup();
+
+ if (isTryBody && DtorType == Dtor_Complete)
+ EmitDtorEpilogue(Dtor, Dtor_Base);
+ EmitDtorEpilogue(Dtor, DtorType);
+ }
+ }
bool SkipBody = false; // should get jump-threaded
@@ -725,27 +734,12 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// nothing to do besides what's in the epilogue
}
- // Jump to the cleanup block.
- CleanupBlockInfo Info = PopCleanupBlock();
- assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!");
- EmitBlock(DtorEpilogue);
-
- // Emit the destructor epilogue now. If this is a complete
- // destructor with a function-try-block, perform the base epilogue
- // as well.
- if (isTryBody && DtorType == Dtor_Complete)
- EmitDtorEpilogue(Dtor, Dtor_Base);
- EmitDtorEpilogue(Dtor, DtorType);
-
- // Link up the cleanup information.
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
+ // We're done with the epilogue cleanup.
+ PopCleanupBlock();
// Exit the try if applicable.
if (isTryBody)
- ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo);
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
}
/// EmitDtorEpilogue - Emit all code that comes at the end of class's
@@ -784,7 +778,7 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
// Ignore trivial destructors.
if (BaseClassDecl->hasTrivialDestructor())
continue;
- const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
+ const CXXDestructorDecl *D = BaseClassDecl->getDestructor();
llvm::Value *V =
GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(),
ClassDecl, BaseClassDecl,
@@ -839,10 +833,10 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
BasePtr = llvm::PointerType::getUnqual(BasePtr);
llvm::Value *BaseAddrPtr =
Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(),
Array, BaseAddrPtr);
} else
- EmitCXXDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ EmitCXXDestructorCall(FieldClassDecl->getDestructor(),
Dtor_Complete, /*ForVirtualBase=*/false,
LHS.getAddress());
}
@@ -863,7 +857,7 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
if (BaseClassDecl->hasTrivialDestructor())
continue;
- const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
+ const CXXDestructorDecl *D = BaseClassDecl->getDestructor();
llvm::Value *V =
GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(), ClassDecl,
BaseClassDecl,
@@ -940,7 +934,7 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
// Keep track of the current number of live temporaries.
{
- CXXTemporariesCleanupScope Scope(*this);
+ RunCleanupsScope Scope(*this);
EmitCXXConstructorCall(D, Ctor_Complete, /*ForVirtualBase=*/false, Address,
ArgBeg, ArgEnd);
@@ -1033,51 +1027,6 @@ CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
EmitBlock(AfterFor, true);
}
-/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
-/// invoked, calls the default destructor on array elements in reverse order of
-/// construction.
-llvm::Constant *
-CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This) {
- FunctionArgList Args;
- ImplicitParamDecl *Dst =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
- Args.push_back(std::make_pair(Dst, Dst->getType()));
-
- llvm::SmallString<16> Name;
- llvm::raw_svector_ostream(Name) << "__tcf_" << (++UniqueAggrDestructorCount);
- QualType R = getContext().VoidTy;
- const CGFunctionInfo &FI
- = CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
- const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
- llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- Name.str(),
- &CGM.getModule());
- IdentifierInfo *II = &CGM.getContext().Idents.get(Name.str());
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R, 0,
- FunctionDecl::Static,
- FunctionDecl::None,
- false, true);
- StartFunction(FD, R, Fn, Args, SourceLocation());
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
- FinishFunction();
- llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),
- 0);
- llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
- return m;
-}
-
-
void
CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
@@ -1160,6 +1109,23 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
}
+void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
+ CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
+ if (!ClassDecl) return;
+ if (ClassDecl->hasTrivialDestructor()) return;
+
+ const CXXDestructorDecl *D = ClassDecl->getDestructor();
+
+ CleanupBlock Scope(*this, NormalCleanup);
+
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Addr);
+
+ if (Exceptions) {
+ Scope.beginEHCleanup();
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Addr);
+ }
+}
+
llvm::Value *
CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
const CXXRecordDecl *ClassDecl,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
index c9bcb1b..4e15895 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -21,7 +21,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/Version.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
@@ -536,6 +536,19 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
Context.getPointerType(Context.getTagDeclType(Method->getParent()));
llvm::DIType ThisPtrType =
DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
+
+ unsigned Quals = Method->getTypeQualifiers();
+ if (Quals & Qualifiers::Const)
+ ThisPtrType =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_const_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0, ThisPtrType);
+ if (Quals & Qualifiers::Volatile)
+ ThisPtrType =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_volatile_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0, ThisPtrType);
+
TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
Elts.push_back(ThisPtrType);
@@ -567,9 +580,9 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
- MangleBuffer MethodLinkageName;
+ llvm::StringRef MethodLinkageName;
if (!IsCtorOrDtor)
- CGM.getMangledName(MethodLinkageName, Method);
+ MethodLinkageName = CGM.getMangledName(Method);
// Get the location for the method.
llvm::DIFile MethodDefUnit = getOrCreateFile(Method->getLocation());
@@ -598,7 +611,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
MethodLinkageName,
MethodDefUnit, MethodLine,
MethodTy, /*isLocalToUnit=*/false,
- Method->isThisDeclarationADefinition(),
+ /* isDefintion=*/ false,
Virtuality, VIndex, ContainingType);
// Don't cache ctors or dtors since we have to emit multiple functions for
@@ -758,22 +771,30 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
+ llvm::DIDescriptor FDContext =
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+
+ // If this is just a forward declaration, construct an appropriately
+ // marked node and just return it.
+ if (!RD->getDefinition()) {
+ llvm::DICompositeType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, FDContext, RD->getName(),
+ DefUnit, Line, 0, 0, 0,
+ llvm::DIType::FlagFwdDecl,
+ llvm::DIType(), llvm::DIArray());
+
+ return FwdDecl;
+ }
// A RD->getName() is not unique. However, the debug info descriptors
// are uniqued so use type name to ensure uniquness.
llvm::SmallString<128> FwdDeclName;
llvm::raw_svector_ostream(FwdDeclName) << "fwd.type." << FwdDeclCount++;
- llvm::DIDescriptor FDContext =
- getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
llvm::DICompositeType FwdDecl =
DebugFactory.CreateCompositeType(Tag, FDContext, FwdDeclName,
DefUnit, Line, 0, 0, 0, 0,
llvm::DIType(), llvm::DIArray());
- // If this is just a forward declaration, return it.
- if (!RD->getDefinition())
- return FwdDecl;
-
llvm::MDNode *MN = FwdDecl;
llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
// Otherwise, insert it into the TypeCache so that recursive uses will find
@@ -1289,7 +1310,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
CGBuilderTy &Builder) {
llvm::StringRef Name;
- MangleBuffer LinkageName;
+ llvm::StringRef LinkageName;
const Decl *D = GD.getDecl();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
@@ -1307,11 +1328,11 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
}
Name = getFunctionName(FD);
// Use mangled name as linkage name for c/c++ functions.
- CGM.getMangledName(LinkageName, GD);
+ LinkageName = CGM.getMangledName(GD);
} else {
// Use llvm function name as linkage name.
Name = Fn->getName();
- LinkageName.setString(Name);
+ LinkageName = Name;
}
if (!Name.empty() && Name[0] == '\01')
Name = Name.substr(1);
@@ -1477,7 +1498,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::DIVariable D =
DebugFactory.CreateVariable(Tag, llvm::DIDescriptor(RegionStack.back()),
VD->getName(),
- Unit, Line, Ty);
+ Unit, Line, Ty, CGM.getLangOptions().Optimize);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
index 07edca0..1a62ea9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -20,7 +20,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Intrinsics.h"
#include "llvm/Target/TargetData.h"
@@ -38,7 +38,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::ClassTemplatePartialSpecialization:
case Decl::TemplateTypeParm:
case Decl::UnresolvedUsingValue:
- case Decl::NonTypeTemplateParm:
+ case Decl::NonTypeTemplateParm:
case Decl::CXXMethod:
case Decl::CXXConstructor:
case Decl::CXXDestructor:
@@ -59,6 +59,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::ObjCImplementation:
case Decl::ObjCProperty:
case Decl::ObjCCompatibleAlias:
+ case Decl::AccessSpec:
case Decl::LinkageSpec:
case Decl::ObjCPropertyImpl:
case Decl::ObjCClass:
@@ -138,16 +139,14 @@ static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
const char *Separator) {
CodeGenModule &CGM = CGF.CGM;
if (CGF.getContext().getLangOptions().CPlusPlus) {
- MangleBuffer Name;
- CGM.getMangledName(Name, &D);
- return Name.getString().str();
+ llvm::StringRef Name = CGM.getMangledName(&D);
+ return Name.str();
}
std::string ContextName;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
- MangleBuffer Name;
- CGM.getMangledName(Name, FD);
- ContextName = Name.getString().str();
+ llvm::StringRef Name = CGM.getMangledName(FD);
+ ContextName = Name.str();
} else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
ContextName = CGF.CurFn->getName();
else
@@ -328,10 +327,10 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
// int32_t __flags;
- Types.push_back(llvm::Type::getInt32Ty(VMContext));
+ Types.push_back(Int32Ty);
// int32_t __size;
- Types.push_back(llvm::Type::getInt32Ty(VMContext));
+ Types.push_back(Int32Ty);
bool HasCopyAndDispose = BlockRequiresCopying(Ty);
if (HasCopyAndDispose) {
@@ -389,10 +388,63 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
return Info.first;
}
+namespace {
+ struct CallArrayDtor : EHScopeStack::LazyCleanup {
+ CallArrayDtor(const CXXDestructorDecl *Dtor,
+ const ConstantArrayType *Type,
+ llvm::Value *Loc)
+ : Dtor(Dtor), Type(Type), Loc(Loc) {}
+
+ const CXXDestructorDecl *Dtor;
+ const ConstantArrayType *Type;
+ llvm::Value *Loc;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Type);
+ const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr);
+ CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr);
+ }
+ };
+
+ struct CallVarDtor : EHScopeStack::LazyCleanup {
+ CallVarDtor(const CXXDestructorDecl *Dtor,
+ llvm::Value *NRVOFlag,
+ llvm::Value *Loc)
+ : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {}
+
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *NRVOFlag;
+ llvm::Value *Loc;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // Along the exceptions path we always execute the dtor.
+ bool NRVO = !IsForEH && NRVOFlag;
+
+ llvm::BasicBlock *SkipDtorBB = 0;
+ if (NRVO) {
+ // If we exited via NRVO, we skip the destructor call.
+ llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
+ SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
+ llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
+ CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
+ CGF.EmitBlock(RunDtorBB);
+ }
+
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Loc);
+
+ if (NRVO) CGF.EmitBlock(SkipDtorBB);
+ }
+ };
+}
+
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
-void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
+void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
+ SpecialInitFn *SpecialInit) {
QualType Ty = D.getType();
bool isByRef = D.hasAttr<BlocksAttr>();
bool needsDispose = false;
@@ -490,7 +542,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
{
// Push a cleanup block and restore the stack there.
- DelayedCleanupBlock scope(*this);
+ CleanupBlock scope(*this, NormalCleanup);
V = Builder.CreateLoad(Stack, "tmp");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
@@ -505,10 +557,6 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
llvm::Value *VLASize = EmitVLASize(Ty);
- // Downcast the VLA size expression
- VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
- false, "tmp");
-
// Allocate memory for the array.
llvm::AllocaInst *VLA =
Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
@@ -573,18 +621,18 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
int isa = 0;
if (flag&BLOCK_FIELD_IS_WEAK)
isa = 1;
- V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
+ V = llvm::ConstantInt::get(Int32Ty, isa);
V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
Builder.CreateStore(V, isa_field);
Builder.CreateStore(DeclPtr, forwarding_field);
- V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
+ V = llvm::ConstantInt::get(Int32Ty, flags);
Builder.CreateStore(V, flags_field);
const llvm::Type *V1;
V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
- V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ V = llvm::ConstantInt::get(Int32Ty,
CGM.GetTargetTypeStoreSize(V1).getQuantity());
Builder.CreateStore(V, size_field);
@@ -602,7 +650,9 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
}
}
- if (Init) {
+ if (SpecialInit) {
+ SpecialInit(*this, D, DeclPtr);
+ } else if (Init) {
llvm::Value *Loc = DeclPtr;
if (isByRef)
Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
@@ -618,8 +668,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
assert(Init != 0 && "Wasn't a simple constant init?");
llvm::Value *AlignVal =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- Align.getQuantity());
+ llvm::ConstantInt::get(Int32Ty, Align.getQuantity());
const llvm::Type *IntPtr =
llvm::IntegerType::get(VMContext, LLVMPointerWidth);
llvm::Value *SizeVal =
@@ -658,7 +707,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
Loc, SrcPtr, SizeVal, AlignVal, NotVolatile);
}
} else if (Ty->isReferenceType()) {
- RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
+ RValue RV = EmitReferenceBindingToExpr(Init, &D);
EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
} else if (!hasAggregateLLVMType(Init->getType())) {
llvm::Value *V = EmitScalarExpr(Init);
@@ -669,7 +718,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
EmitAggExpr(Init, Loc, isVolatile);
}
}
-
+
// Handle CXX destruction of variables.
QualType DtorTy(Ty);
while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
@@ -684,60 +733,16 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
D.getNameAsString());
- const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
+ const CXXDestructorDecl *D = ClassDecl->getDestructor();
assert(D && "EmitLocalBlockVarDecl - destructor is nul");
if (const ConstantArrayType *Array =
getContext().getAsConstantArrayType(Ty)) {
- {
- DelayedCleanupBlock Scope(*this);
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Loc, BasePtr);
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
-
- // Make sure to jump to the exit block.
- EmitBranch(Scope.getCleanupExitBlock());
- }
- if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Loc, BasePtr);
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
- }
+ EHStack.pushLazyCleanup<CallArrayDtor>(NormalAndEHCleanup,
+ D, Array, Loc);
} else {
- {
- // Normal destruction.
- DelayedCleanupBlock Scope(*this);
-
- if (NRVO) {
- // If we exited via NRVO, we skip the destructor call.
- llvm::BasicBlock *NoNRVO = createBasicBlock("nrvo.unused");
- Builder.CreateCondBr(Builder.CreateLoad(NRVOFlag, "nrvo.val"),
- Scope.getCleanupExitBlock(),
- NoNRVO);
- EmitBlock(NoNRVO);
- }
-
- // We don't call the destructor along the normal edge if we're
- // applying the NRVO.
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
- Loc);
-
- // Make sure to jump to the exit block.
- EmitBranch(Scope.getCleanupExitBlock());
- }
-
- if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
- Loc);
- }
+ EHStack.pushLazyCleanup<CallVarDtor>(NormalAndEHCleanup,
+ D, NRVOFlag, Loc);
}
}
}
@@ -758,17 +763,19 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
//
// To fix this we insert a bitcast here.
QualType ArgTy = Info.arg_begin()->type;
- {
- DelayedCleanupBlock scope(*this);
- CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
- ConvertType(ArgTy))),
- getContext().getPointerType(D.getType())));
- EmitCall(Info, F, ReturnValueSlot(), Args);
- }
+ CleanupBlock CleanupScope(*this, NormalCleanup);
+
+ // Normal cleanup.
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
+ ConvertType(ArgTy))),
+ getContext().getPointerType(D.getType())));
+ EmitCall(Info, F, ReturnValueSlot(), Args);
+
+ // EH cleanup.
if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
+ CleanupScope.beginEHCleanup();
CallArgList Args;
Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
@@ -779,15 +786,16 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
}
if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
- {
- DelayedCleanupBlock scope(*this);
- llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
- V = Builder.CreateLoad(V);
- BuildBlockRelease(V);
- }
+ CleanupBlock CleanupScope(*this, NormalCleanup);
+
+ llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+ V = Builder.CreateLoad(V);
+ BuildBlockRelease(V);
+
// FIXME: Turn this on and audit the codegen
if (0 && Exceptions) {
- EHCleanupBlock Cleanup(*this);
+ CleanupScope.beginEHCleanup();
+
llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
V = Builder.CreateLoad(V);
BuildBlockRelease(V);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
index f94ddd9..ec3f386 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
using namespace clang;
@@ -66,16 +66,15 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
if (RD->hasTrivialDestructor())
return;
- CXXDestructorDecl *Dtor = RD->getDestructor(Context);
+ CXXDestructorDecl *Dtor = RD->getDestructor();
llvm::Constant *DtorFn;
if (Array) {
DtorFn =
- CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor,
- Array,
- DeclPtr);
+ CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor, Array,
+ DeclPtr);
const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
} else
DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
@@ -94,13 +93,9 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
EmitDeclDestroy(*this, D, DeclPtr);
return;
}
- if (Init->isLvalue(getContext()) == Expr::LV_Valid) {
- RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
- EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T);
- return;
- }
- ErrorUnsupported(Init,
- "global variable that binds reference to a non-lvalue");
+
+ RValue RV = EmitReferenceBindingToExpr(Init, &D);
+ EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T);
}
void
@@ -144,6 +139,25 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
}
+static llvm::Function *
+CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
+ const llvm::FunctionType *FTy,
+ llvm::StringRef Name) {
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ Name, &CGM.getModule());
+
+ // Set the section if needed.
+ if (const char *Section =
+ CGM.getContext().Target.getStaticInitSectionSpecifier())
+ Fn->setSection(Section);
+
+ if (!CGM.getLangOptions().Exceptions)
+ Fn->setDoesNotThrow();
+
+ return Fn;
+}
+
void
CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
const llvm::FunctionType *FTy
@@ -152,17 +166,22 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
// Create a variable initialization function.
llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- "__cxx_global_var_init", &TheModule);
+ CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init");
CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D);
- CXXGlobalInits.push_back(Fn);
+ if (D->hasAttr<InitPriorityAttr>()) {
+ unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
+ OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
+ PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
+ }
+ else
+ CXXGlobalInits.push_back(Fn);
}
void
CodeGenModule::EmitCXXGlobalInitFunc() {
- if (CXXGlobalInits.empty())
+ if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
const llvm::FunctionType *FTy
@@ -170,21 +189,30 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
false);
// Create our global initialization function.
- llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- "_GLOBAL__I_a", &TheModule);
-
- CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
- &CXXGlobalInits[0],
- CXXGlobalInits.size());
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
+
+ if (!PrioritizedCXXGlobalInits.empty()) {
+ llvm::SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
+ llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
+ PrioritizedCXXGlobalInits.end());
+ for (unsigned i = 0; i < PrioritizedCXXGlobalInits.size(); i++) {
+ llvm::Function *Fn = PrioritizedCXXGlobalInits[i].second;
+ LocalCXXGlobalInits.push_back(Fn);
+ }
+ for (unsigned i = 0; i < CXXGlobalInits.size(); i++)
+ LocalCXXGlobalInits.push_back(CXXGlobalInits[i]);
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &LocalCXXGlobalInits[0],
+ LocalCXXGlobalInits.size());
+ }
+ else
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &CXXGlobalInits[0],
+ CXXGlobalInits.size());
AddGlobalCtor(Fn);
}
-void CodeGenModule::AddCXXDtorEntry(llvm::Constant *DtorFn,
- llvm::Constant *Object) {
- CXXGlobalDtors.push_back(std::make_pair(DtorFn, Object));
-}
-
void CodeGenModule::EmitCXXGlobalDtorFunc() {
if (CXXGlobalDtors.empty())
return;
@@ -195,8 +223,7 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
// Create our global destructor function.
llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- "_GLOBAL__D_a", &TheModule);
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__D_a");
CodeGenFunction(*this).GenerateCXXGlobalDtorFunc(Fn, CXXGlobalDtors);
AddGlobalDtor(Fn);
@@ -226,14 +253,14 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
}
void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
- const std::vector<std::pair<llvm::Constant*, llvm::Constant*> >
+ const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
&DtorsAndObjects) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
SourceLocation());
// Emit the dtors, in reverse order from construction.
for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
- llvm::Constant *Callee = DtorsAndObjects[e - i - 1].first;
+ llvm::Value *Callee = DtorsAndObjects[e - i - 1].first;
llvm::CallInst *CI = Builder.CreateCall(Callee,
DtorsAndObjects[e - i - 1].second);
// Make sure the call and the callee agree on calling convention.
@@ -301,7 +328,6 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
CGM.getMangleContext().mangleGuardVariable(&D, GuardVName);
// Create the guard variable.
- const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(VMContext);
llvm::GlobalValue *GuardVariable =
new llvm::GlobalVariable(CGM.getModule(), Int64Ty,
false, GV->getLinkage(),
@@ -324,8 +350,6 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
EmitBlock(InitCheckBlock);
// Variables used when coping with thread-safe statics and exceptions.
- llvm::BasicBlock *SavedLandingPad = 0;
- llvm::BasicBlock *LandingPad = 0;
if (ThreadsafeStatics) {
// Call __cxa_guard_acquire.
V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
@@ -335,10 +359,10 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
InitBlock, EndBlock);
+ // Call __cxa_guard_abort along the exceptional edge.
if (Exceptions) {
- SavedLandingPad = getInvokeDest();
- LandingPad = createBasicBlock("guard.lpad");
- setInvokeDest(LandingPad);
+ CleanupBlock Cleanup(*this, EHCleanup);
+ Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
}
EmitBlock(InitBlock);
@@ -346,17 +370,14 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
if (D.getType()->isReferenceType()) {
QualType T = D.getType();
- // We don't want to pass true for IsInitializer here, because a static
- // reference to a temporary does not extend its lifetime.
- RValue RV = EmitReferenceBindingToExpr(D.getInit(),
- /*IsInitializer=*/false);
+ RValue RV = EmitReferenceBindingToExpr(D.getInit(), &D);
EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, T);
} else
EmitDeclInit(*this, D, GV);
if (ThreadsafeStatics) {
- // Call __cxa_guard_release.
+ // Call __cxa_guard_release. This cannot throw.
Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
} else {
llvm::Value *One =
@@ -368,57 +389,39 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
if (!D.getType()->isReferenceType())
EmitDeclDestroy(*this, D, GV);
- if (ThreadsafeStatics && Exceptions) {
- // If an exception is thrown during initialization, call __cxa_guard_abort
- // along the exceptional edge.
- EmitBranch(EndBlock);
-
- // Construct the landing pad.
- EmitBlock(LandingPad);
-
- // Personality function and LLVM intrinsics.
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
- (VMContext),
- true),
- "__gxx_personality_v0");
- Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
-
- // Exception object
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
-
- // Call the selector function.
- const llvm::PointerType *PtrToInt8Ty
- = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
- llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
- llvm::Value* SelectorArgs[3] = { Exc, Personality, Null };
- Builder.CreateCall(llvm_eh_selector, SelectorArgs, SelectorArgs + 3,
- "selector");
- Builder.CreateStore(Exc, RethrowPtr);
-
- // Call __cxa_guard_abort along the exceptional edge.
- Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
-
- setInvokeDest(SavedLandingPad);
-
- // Rethrow the current exception.
- if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont,
- getInvokeDest(),
- Builder.CreateLoad(RethrowPtr));
- EmitBlock(Cont);
- } else
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(RethrowPtr));
-
- Builder.CreateUnreachable();
- }
-
EmitBlock(EndBlock);
}
+
+/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
+/// invoked, calls the default destructor on array elements in reverse order of
+/// construction.
+llvm::Function *
+CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This) {
+ FunctionArgList Args;
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo());
+ const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
+
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, Args, SourceLocation());
+
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy)->getPointerTo();
+ llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
+
+ EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+
+ FinishFunction();
+
+ return Fn;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
index ddc1c77..4980aad 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
@@ -14,11 +14,194 @@
#include "clang/AST/StmtCXX.h"
#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
#include "CodeGenFunction.h"
+#include "CGException.h"
+
using namespace clang;
using namespace CodeGen;
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t Size) {
+ if (!StartOfBuffer) {
+ unsigned Capacity = 1024;
+ while (Capacity < Size) Capacity *= 2;
+ StartOfBuffer = new char[Capacity];
+ StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
+ } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
+ unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
+ unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
+
+ unsigned NewCapacity = CurrentCapacity;
+ do {
+ NewCapacity *= 2;
+ } while (NewCapacity < UsedCapacity + Size);
+
+ char *NewStartOfBuffer = new char[NewCapacity];
+ char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
+ char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
+ memcpy(NewStartOfData, StartOfData, UsedCapacity);
+ delete [] StartOfBuffer;
+ StartOfBuffer = NewStartOfBuffer;
+ EndOfBuffer = NewEndOfBuffer;
+ StartOfData = NewStartOfData;
+ }
+
+ assert(StartOfBuffer + Size <= StartOfData);
+ StartOfData -= Size;
+ return StartOfData;
+}
+
+EHScopeStack::stable_iterator
+EHScopeStack::getEnclosingEHCleanup(iterator it) const {
+ assert(it != end());
+ do {
+ if (isa<EHCleanupScope>(*it)) {
+ if (cast<EHCleanupScope>(*it).isEHCleanup())
+ return stabilize(it);
+ return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
+ }
+ if (isa<EHLazyCleanupScope>(*it)) {
+ if (cast<EHLazyCleanupScope>(*it).isEHCleanup())
+ return stabilize(it);
+ return cast<EHLazyCleanupScope>(*it).getEnclosingEHCleanup();
+ }
+ ++it;
+ } while (it != end());
+ return stable_end();
+}
+
+
+void *EHScopeStack::pushLazyCleanup(CleanupKind Kind, size_t Size) {
+ assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
+ char *Buffer = allocate(EHLazyCleanupScope::getSizeForCleanupSize(Size));
+ bool IsNormalCleanup = Kind != EHCleanup;
+ bool IsEHCleanup = Kind != NormalCleanup;
+ EHLazyCleanupScope *Scope =
+ new (Buffer) EHLazyCleanupScope(IsNormalCleanup,
+ IsEHCleanup,
+ Size,
+ BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHCleanup);
+ if (IsNormalCleanup)
+ InnermostNormalCleanup = stable_begin();
+ if (IsEHCleanup)
+ InnermostEHCleanup = stable_begin();
+
+ return Scope->getCleanupBuffer();
+}
+
+void EHScopeStack::pushCleanup(llvm::BasicBlock *NormalEntry,
+ llvm::BasicBlock *NormalExit,
+ llvm::BasicBlock *EHEntry,
+ llvm::BasicBlock *EHExit) {
+ char *Buffer = allocate(EHCleanupScope::getSize());
+ new (Buffer) EHCleanupScope(BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHCleanup,
+ NormalEntry, NormalExit, EHEntry, EHExit);
+ if (NormalEntry)
+ InnermostNormalCleanup = stable_begin();
+ if (EHEntry)
+ InnermostEHCleanup = stable_begin();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ if (isa<EHLazyCleanupScope>(*begin())) {
+ EHLazyCleanupScope &Cleanup = cast<EHLazyCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += Cleanup.getAllocatedSize();
+ } else {
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += EHCleanupScope::getSize();
+ }
+
+ // Check whether we can shrink the branch-fixups stack.
+ if (!BranchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups())
+ BranchFixups.clear();
+
+ // Otherwise we can still trim out unnecessary nulls.
+ else
+ popNullFixups();
+ }
+}
+
+EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) {
+ char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters));
+ CatchDepth++;
+ return new (Buffer) EHFilterScope(NumFilters);
+}
+
+void EHScopeStack::popFilter() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHFilterScope &Filter = cast<EHFilterScope>(*begin());
+ StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters());
+
+ assert(CatchDepth > 0 && "mismatched filter push/pop");
+ CatchDepth--;
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) {
+ char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers));
+ CatchDepth++;
+ return new (Buffer) EHCatchScope(NumHandlers);
+}
+
+void EHScopeStack::pushTerminate() {
+ char *Buffer = allocate(EHTerminateScope::getSize());
+ CatchDepth++;
+ new (Buffer) EHTerminateScope();
+}
+
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ assert(hasNormalCleanups());
+
+ EHScopeStack::iterator it = find(InnermostNormalCleanup);
+ unsigned MinSize;
+ if (isa<EHCleanupScope>(*it))
+ MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ else
+ MinSize = cast<EHLazyCleanupScope>(*it).getFixupDepth();
+ assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
+
+ while (BranchFixups.size() > MinSize &&
+ BranchFixups.back().Destination == 0)
+ BranchFixups.pop_back();
+}
+
+void EHScopeStack::resolveBranchFixups(llvm::BasicBlock *Dest) {
+ assert(Dest && "null block passed to resolveBranchFixups");
+
+ if (BranchFixups.empty()) return;
+ assert(hasNormalCleanups() &&
+ "branch fixups exist with no normal cleanups on stack");
+
+ for (unsigned I = 0, E = BranchFixups.size(); I != E; ++I)
+ if (BranchFixups[I].Destination == Dest)
+ BranchFixups[I].Destination = 0;
+
+ popNullFixups();
+}
+
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
@@ -66,8 +249,19 @@ static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
}
+static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
+ // void *__cxa_get_exception_ptr(void*);
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
+}
+
static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
- // void* __cxa_begin_catch();
+ // void *__cxa_begin_catch(void*);
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
std::vector<const llvm::Type*> Args(1, Int8PtrTy);
@@ -123,25 +317,114 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort");
}
-static llvm::Constant *getPersonalityFn(CodeGenModule &CGM) {
- const char *PersonalityFnName = "__gcc_personality_v0";
- LangOptions Opts = CGM.getLangOptions();
- if (Opts.CPlusPlus)
- PersonalityFnName = "__gxx_personality_v0";
- else if (Opts.ObjC1) {
- if (Opts.NeXTRuntime) {
- if (Opts.ObjCNonFragileABI)
- PersonalityFnName = "__gcc_personality_v0";
- } else
- PersonalityFnName = "__gnu_objc_personality_v0";
+static const char *getCPersonalityFn(CodeGenFunction &CGF) {
+ return "__gcc_personality_v0";
+}
+
+static const char *getObjCPersonalityFn(CodeGenFunction &CGF) {
+ if (CGF.CGM.getLangOptions().NeXTRuntime) {
+ if (CGF.CGM.getLangOptions().ObjCNonFragileABI)
+ return "__objc_personality_v0";
+ else
+ return getCPersonalityFn(CGF);
+ } else {
+ return "__gnu_objc_personality_v0";
}
+}
+
+static const char *getCXXPersonalityFn(CodeGenFunction &CGF) {
+ if (CGF.CGM.getLangOptions().SjLjExceptions)
+ return "__gxx_personality_sj0";
+ else
+ return "__gxx_personality_v0";
+}
+
+/// Determines the personality function to use when both C++
+/// and Objective-C exceptions are being caught.
+static const char *getObjCXXPersonalityFn(CodeGenFunction &CGF) {
+ // The ObjC personality defers to the C++ personality for non-ObjC
+ // handlers. Unlike the C++ case, we use the same personality
+ // function on targets using (backend-driven) SJLJ EH.
+ if (CGF.CGM.getLangOptions().NeXTRuntime) {
+ if (CGF.CGM.getLangOptions().ObjCNonFragileABI)
+ return "__objc_personality_v0";
+
+ // In the fragile ABI, just use C++ exception handling and hope
+ // they're not doing crazy exception mixing.
+ else
+ return getCXXPersonalityFn(CGF);
+ }
+
+ // I'm pretty sure the GNU runtime doesn't support mixed EH.
+ // TODO: we don't necessarily need mixed EH here; remember what
+ // kind of exceptions we actually try to catch in this function.
+ CGF.CGM.ErrorUnsupported(CGF.CurCodeDecl,
+ "the GNU Objective C runtime does not support "
+ "catching C++ and Objective C exceptions in the "
+ "same function");
+ // Use the C++ personality just to avoid returning null.
+ return getCXXPersonalityFn(CGF);
+}
+
+static llvm::Constant *getPersonalityFn(CodeGenFunction &CGF) {
+ const char *Name;
+ const LangOptions &Opts = CGF.CGM.getLangOptions();
+ if (Opts.CPlusPlus && Opts.ObjC1)
+ Name = getObjCXXPersonalityFn(CGF);
+ else if (Opts.CPlusPlus)
+ Name = getCXXPersonalityFn(CGF);
+ else if (Opts.ObjC1)
+ Name = getObjCPersonalityFn(CGF);
+ else
+ Name = getCPersonalityFn(CGF);
llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(
- CGM.getLLVMContext()),
- true),
- PersonalityFnName);
- return llvm::ConstantExpr::getBitCast(Personality, CGM.PtrToInt8Ty);
+ CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getInt32Ty(
+ CGF.CGM.getLLVMContext()),
+ true),
+ Name);
+ return llvm::ConstantExpr::getBitCast(Personality, CGF.CGM.PtrToInt8Ty);
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a catch-all.
+static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
+ // Possibly we should use @llvm.eh.catch.all.value here.
+ return llvm::ConstantPointerNull::get(CGF.CGM.PtrToInt8Ty);
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a cleanup.
+static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
+ return llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
+}
+
+namespace {
+ /// A cleanup to free the exception object if its initialization
+ /// throws.
+ struct FreeExceptionCleanup : EHScopeStack::LazyCleanup {
+ FreeExceptionCleanup(llvm::Value *ShouldFreeVar,
+ llvm::Value *ExnLocVar)
+ : ShouldFreeVar(ShouldFreeVar), ExnLocVar(ExnLocVar) {}
+
+ llvm::Value *ShouldFreeVar;
+ llvm::Value *ExnLocVar;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
+
+ llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
+ "should-free-exnobj");
+ CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
+ CGF.EmitBlock(FreeBB);
+ llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal)
+ ->setDoesNotThrow();
+ CGF.EmitBlock(DoneBB);
+ }
+ };
}
// Emits an exception expression into the given location. This
@@ -166,21 +449,14 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
llvm::AllocaInst *ExnLocVar =
CGF.CreateTempAlloca(ExnLoc->getType(), "exnobj.var");
- llvm::BasicBlock *SavedInvokeDest = CGF.getInvokeDest();
- {
- CodeGenFunction::EHCleanupBlock Cleanup(CGF);
- llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
- llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
-
- llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
- "should-free-exnobj");
- CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
- CGF.EmitBlock(FreeBB);
- llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
- CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal);
- CGF.EmitBlock(DoneBB);
- }
- llvm::BasicBlock *Cleanup = CGF.getInvokeDest();
+ // Make sure the exception object is cleaned up if there's an
+ // exception during initialization.
+ // FIXME: stmt expressions might require this to be a normal
+ // cleanup, too.
+ CGF.EHStack.pushLazyCleanup<FreeExceptionCleanup>(EHCleanup,
+ ShouldFreeVar,
+ ExnLocVar);
+ EHScopeStack::stable_iterator Cleanup = CGF.EHStack.stable_begin();
CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
@@ -203,74 +479,38 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
ShouldFreeVar);
- // Pop the cleanup block if it's still the top of the cleanup stack.
- // Otherwise, temporaries have been created and our cleanup will get
- // properly removed in time.
- // TODO: this is not very resilient.
- if (CGF.getInvokeDest() == Cleanup)
- CGF.setInvokeDest(SavedInvokeDest);
-}
-
-// CopyObject - Utility to copy an object. Calls copy constructor as necessary.
-// N is casted to the right type.
-static void CopyObject(CodeGenFunction &CGF, QualType ObjectType,
- bool WasPointer, bool WasPointerReference,
- llvm::Value *E, llvm::Value *N) {
- // Store the throw exception in the exception object.
- if (WasPointer || !CGF.hasAggregateLLVMType(ObjectType)) {
- llvm::Value *Value = E;
- if (!WasPointer)
- Value = CGF.Builder.CreateLoad(Value);
- const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0);
- if (WasPointerReference) {
- llvm::Value *Tmp = CGF.CreateTempAlloca(Value->getType(), "catch.param");
- CGF.Builder.CreateStore(Value, Tmp);
- Value = Tmp;
- ValuePtrTy = Value->getType()->getPointerTo(0);
- }
- N = CGF.Builder.CreateBitCast(N, ValuePtrTy);
- CGF.Builder.CreateStore(Value, N);
- } else {
- const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(0);
- const CXXRecordDecl *RD;
- RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl());
- llvm::Value *This = CGF.Builder.CreateBitCast(N, Ty);
- if (RD->hasTrivialCopyConstructor()) {
- CGF.EmitAggregateCopy(This, E, ObjectType);
- } else if (CXXConstructorDecl *CopyCtor
- = RD->getCopyConstructor(CGF.getContext(), 0)) {
- llvm::Value *Src = E;
-
- // Stolen from EmitClassAggrMemberwiseCopy
- llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor,
- Ctor_Complete);
- CallArgList CallArgs;
- CallArgs.push_back(std::make_pair(RValue::get(This),
- CopyCtor->getThisType(CGF.getContext())));
-
- // Push the Src ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Src),
- CopyCtor->getParamDecl(0)->getType()));
-
- const FunctionProtoType *FPT
- = CopyCtor->getType()->getAs<FunctionProtoType>();
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- Callee, ReturnValueSlot(), CallArgs, CopyCtor);
- } else
- llvm_unreachable("uncopyable object");
+ // Technically, the exception object is like a temporary; it has to
+ // be cleaned up when its full-expression is complete.
+ // Unfortunately, the AST represents full-expressions by creating a
+ // CXXExprWithTemporaries, which it only does when there are actually
+ // temporaries.
+ //
+ // If any cleanups have been added since we pushed ours, they must
+ // be from temporaries; this will get popped at the same time.
+ // Otherwise we need to pop ours off. FIXME: this is very brittle.
+ if (Cleanup == CGF.EHStack.stable_begin())
+ CGF.PopCleanupBlock();
+}
+
+llvm::Value *CodeGenFunction::getExceptionSlot() {
+ if (!ExceptionSlot) {
+ const llvm::Type *i8p = llvm::Type::getInt8PtrTy(getLLVMContext());
+ ExceptionSlot = CreateTempAlloca(i8p, "exn.slot");
}
+ return ExceptionSlot;
}
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
if (!E->getSubExpr()) {
if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getReThrowFn(*this), Cont, getInvokeDest())
+ Builder.CreateInvoke(getReThrowFn(*this),
+ getUnreachableBlock(),
+ getInvokeDest())
->setDoesNotReturn();
- EmitBlock(Cont);
- } else
+ } else {
Builder.CreateCall(getReThrowFn(*this))->setDoesNotReturn();
- Builder.CreateUnreachable();
+ Builder.CreateUnreachable();
+ }
// Clear the insertion point to indicate we are in unreachable code.
Builder.ClearInsertionPoint();
@@ -284,10 +524,11 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
- llvm::Value *ExceptionPtr =
+ llvm::CallInst *ExceptionPtr =
Builder.CreateCall(AllocExceptionFn,
llvm::ConstantInt::get(SizeTy, TypeSize),
"exception");
+ ExceptionPtr->setDoesNotThrow();
EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr);
@@ -301,7 +542,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
if (!Record->hasTrivialDestructor()) {
- CXXDestructorDecl *DtorD = Record->getDestructor(getContext());
+ CXXDestructorDecl *DtorD = Record->getDestructor();
Dtor = CGM.GetAddrOfCXXDestructor(DtorD, Dtor_Complete);
Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy);
}
@@ -309,18 +550,17 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
if (!Dtor) Dtor = llvm::Constant::getNullValue(Int8PtrTy);
if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
llvm::InvokeInst *ThrowCall =
- Builder.CreateInvoke3(getThrowFn(*this), Cont, getInvokeDest(),
+ Builder.CreateInvoke3(getThrowFn(*this),
+ getUnreachableBlock(), getInvokeDest(),
ExceptionPtr, TypeInfo, Dtor);
ThrowCall->setDoesNotReturn();
- EmitBlock(Cont);
} else {
llvm::CallInst *ThrowCall =
Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor);
ThrowCall->setDoesNotReturn();
+ Builder.CreateUnreachable();
}
- Builder.CreateUnreachable();
// Clear the insertion point to indicate we are in unreachable code.
Builder.ClearInsertionPoint();
@@ -346,80 +586,15 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
if (!Proto->hasExceptionSpec())
return;
- llvm::Constant *Personality = getPersonalityFn(CGM);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- const llvm::IntegerType *Int8Ty;
- const llvm::PointerType *PtrToInt8Ty;
- Int8Ty = llvm::Type::getInt8Ty(VMContext);
- // C string type. Used in lots of places.
- PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
- llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
- llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
-
- llvm::BasicBlock *PrevLandingPad = getInvokeDest();
- llvm::BasicBlock *EHSpecHandler = createBasicBlock("ehspec.handler");
- llvm::BasicBlock *Match = createBasicBlock("match");
- llvm::BasicBlock *Unwind = 0;
-
- assert(PrevLandingPad == 0 && "EHSpec has invoke context");
- (void)PrevLandingPad;
-
- llvm::BasicBlock *Cont = createBasicBlock("cont");
-
- EmitBranchThroughCleanup(Cont);
-
- // Emit the statements in the try {} block
- setInvokeDest(EHSpecHandler);
-
- EmitBlock(EHSpecHandler);
- // Exception object
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
-
- SelectorArgs.push_back(Exc);
- SelectorArgs.push_back(Personality);
- SelectorArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- Proto->getNumExceptions()+1));
-
- for (unsigned i = 0; i < Proto->getNumExceptions(); ++i) {
- QualType Ty = Proto->getExceptionType(i);
- QualType ExceptType
- = Ty.getNonReferenceType().getUnqualifiedType();
- llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
- SelectorArgs.push_back(EHType);
- }
- if (Proto->getNumExceptions())
- SelectorArgs.push_back(Null);
-
- // Find which handler was matched.
- llvm::Value *Selector
- = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(),
- SelectorArgs.end(), "selector");
- if (Proto->getNumExceptions()) {
- Unwind = createBasicBlock("Unwind");
-
- Builder.CreateStore(Exc, RethrowPtr);
- Builder.CreateCondBr(Builder.CreateICmpSLT(Selector,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- 0)),
- Match, Unwind);
-
- EmitBlock(Match);
- }
- Builder.CreateCall(getUnexpectedFn(*this), Exc)->setDoesNotReturn();
- Builder.CreateUnreachable();
+ unsigned NumExceptions = Proto->getNumExceptions();
+ EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
- if (Proto->getNumExceptions()) {
- EmitBlock(Unwind);
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(RethrowPtr));
- Builder.CreateUnreachable();
+ for (unsigned I = 0; I != NumExceptions; ++I) {
+ QualType Ty = Proto->getExceptionType(I);
+ QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
+ Filter->setFilter(I, EHType);
}
-
- EmitBlock(Cont);
}
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
@@ -436,317 +611,936 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
if (!Proto->hasExceptionSpec())
return;
- setInvokeDest(0);
+ EHStack.popFilter();
}
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
- CXXTryStmtInfo Info = EnterCXXTryStmt(S);
+ EnterCXXTryStmt(S);
EmitStmt(S.getTryBlock());
- ExitCXXTryStmt(S, Info);
+ ExitCXXTryStmt(S);
+}
+
+void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ llvm::BasicBlock *Handler = createBasicBlock("catch");
+ if (C->getExceptionDecl()) {
+ // FIXME: Dropping the reference type on the type into makes it
+ // impossible to correctly implement catch-by-reference
+ // semantics for pointers. Unfortunately, this is what all
+ // existing compilers do, and it's not clear that the standard
+ // personality routine is capable of doing this right. See C++ DR 388:
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
+ QualType CaughtType = C->getCaughtType();
+ CaughtType = CaughtType.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, true);
+ CatchScope->setHandler(I, TypeInfo, Handler);
+ } else {
+ // No exception decl indicates '...', a catch-all.
+ CatchScope->setCatchAllHandler(I, Handler);
+ }
+ }
}
-CodeGenFunction::CXXTryStmtInfo
-CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S) {
- CXXTryStmtInfo Info;
- Info.SavedLandingPad = getInvokeDest();
- Info.HandlerBlock = createBasicBlock("try.handler");
- Info.FinallyBlock = createBasicBlock("finally");
+/// Check whether this is a non-EH scope, i.e. a scope which doesn't
+/// affect exception handling. Currently, the only non-EH scopes are
+/// normal-only cleanup scopes.
+static bool isNonEHScope(const EHScope &S) {
+ switch (S.getKind()) {
+ case EHScope::Cleanup:
+ return !cast<EHCleanupScope>(S).isEHCleanup();
+ case EHScope::LazyCleanup:
+ return !cast<EHLazyCleanupScope>(S).isEHCleanup();
+ case EHScope::Filter:
+ case EHScope::Catch:
+ case EHScope::Terminate:
+ return false;
+ }
- PushCleanupBlock(Info.FinallyBlock);
- setInvokeDest(Info.HandlerBlock);
+ // Suppress warning.
+ return false;
+}
- return Info;
+llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
+ assert(EHStack.requiresLandingPad());
+ assert(!EHStack.empty());
+
+ if (!Exceptions)
+ return 0;
+
+ // Check the innermost scope for a cached landing pad. If this is
+ // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
+ llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
+ if (LP) return LP;
+
+ // Build the landing pad for this scope.
+ LP = EmitLandingPad();
+ assert(LP);
+
+ // Cache the landing pad on the innermost scope. If this is a
+ // non-EH scope, cache the landing pad on the enclosing scope, too.
+ for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) {
+ ir->setCachedLandingPad(LP);
+ if (!isNonEHScope(*ir)) break;
+ }
+
+ return LP;
}
-void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S,
- CXXTryStmtInfo TryInfo) {
- // Pointer to the personality function
- llvm::Constant *Personality = getPersonalityFn(CGM);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
-
- llvm::BasicBlock *PrevLandingPad = TryInfo.SavedLandingPad;
- llvm::BasicBlock *TryHandler = TryInfo.HandlerBlock;
- llvm::BasicBlock *FinallyBlock = TryInfo.FinallyBlock;
- llvm::BasicBlock *FinallyRethrow = createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = createBasicBlock("finally.end");
-
- // Jump to end if there is no exception
- EmitBranchThroughCleanup(FinallyEnd);
-
- llvm::BasicBlock *TerminateHandler = getTerminateHandler();
-
- // Emit the handlers
- EmitBlock(TryHandler);
-
- const llvm::IntegerType *Int8Ty;
- const llvm::PointerType *PtrToInt8Ty;
- Int8Ty = llvm::Type::getInt8Ty(VMContext);
- // C string type. Used in lots of places.
- PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
- llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
- llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
+llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
+ assert(EHStack.requiresLandingPad());
+
+ // This function contains a hack to work around a design flaw in
+ // LLVM's EH IR which breaks semantics after inlining. This same
+ // hack is implemented in llvm-gcc.
+ //
+ // The LLVM EH abstraction is basically a thin veneer over the
+ // traditional GCC zero-cost design: for each range of instructions
+ // in the function, there is (at most) one "landing pad" with an
+ // associated chain of EH actions. A language-specific personality
+ // function interprets this chain of actions and (1) decides whether
+ // or not to resume execution at the landing pad and (2) if so,
+ // provides an integer indicating why it's stopping. In LLVM IR,
+ // the association of a landing pad with a range of instructions is
+ // achieved via an invoke instruction, the chain of actions becomes
+ // the arguments to the @llvm.eh.selector call, and the selector
+ // call returns the integer indicator. Other than the required
+ // presence of two intrinsic function calls in the landing pad,
+ // the IR exactly describes the layout of the output code.
+ //
+ // A principal advantage of this design is that it is completely
+ // language-agnostic; in theory, the LLVM optimizers can treat
+ // landing pads neutrally, and targets need only know how to lower
+ // the intrinsics to have a functioning exceptions system (assuming
+ // that platform exceptions follow something approximately like the
+ // GCC design). Unfortunately, landing pads cannot be combined in a
+ // language-agnostic way: given selectors A and B, there is no way
+ // to make a single landing pad which faithfully represents the
+ // semantics of propagating an exception first through A, then
+ // through B, without knowing how the personality will interpret the
+ // (lowered form of the) selectors. This means that inlining has no
+ // choice but to crudely chain invokes (i.e., to ignore invokes in
+ // the inlined function, but to turn all unwindable calls into
+ // invokes), which is only semantically valid if every unwind stops
+ // at every landing pad.
+ //
+ // Therefore, the invoke-inline hack is to guarantee that every
+ // landing pad has a catch-all.
+ const bool UseInvokeInlineHack = true;
+
+ for (EHScopeStack::iterator ir = EHStack.begin(); ; ) {
+ assert(ir != EHStack.end() &&
+ "stack requiring landing pad is nothing but non-EH scopes?");
+
+ // If this is a terminate scope, just use the singleton terminate
+ // landing pad.
+ if (isa<EHTerminateScope>(*ir))
+ return getTerminateLandingPad();
+
+ // If this isn't an EH scope, iterate; otherwise break out.
+ if (!isNonEHScope(*ir)) break;
+ ++ir;
+
+ // We haven't checked this scope for a cached landing pad yet.
+ if (llvm::BasicBlock *LP = ir->getCachedLandingPad())
+ return LP;
+ }
+
+ // Save the current IR generation state.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // Create and configure the landing pad.
+ llvm::BasicBlock *LP = createBasicBlock("lpad");
+ EmitBlock(LP);
+
+ // Save the exception pointer. It's safe to use a single exception
+ // pointer per function because EH cleanups can never have nested
+ // try/catches.
+ llvm::CallInst *Exn =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
+ Exn->setDoesNotThrow();
+ Builder.CreateStore(Exn, getExceptionSlot());
+
+ // Build the selector arguments.
+ llvm::SmallVector<llvm::Value*, 8> EHSelector;
+ EHSelector.push_back(Exn);
+ EHSelector.push_back(getPersonalityFn(*this));
+
+ // Accumulate all the handlers in scope.
+ llvm::DenseMap<llvm::Value*, JumpDest> EHHandlers;
+ JumpDest CatchAll;
+ bool HasEHCleanup = false;
+ bool HasEHFilter = false;
+ llvm::SmallVector<llvm::Value*, 8> EHFilters;
+ for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end();
+ I != E; ++I) {
+
+ switch (I->getKind()) {
+ case EHScope::LazyCleanup:
+ if (!HasEHCleanup)
+ HasEHCleanup = cast<EHLazyCleanupScope>(*I).isEHCleanup();
+ // We otherwise don't care about cleanups.
+ continue;
+
+ case EHScope::Cleanup:
+ if (!HasEHCleanup)
+ HasEHCleanup = cast<EHCleanupScope>(*I).isEHCleanup();
+ // We otherwise don't care about cleanups.
+ continue;
+
+ case EHScope::Filter: {
+ assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
+ assert(!CatchAll.Block && "EH filter reached after catch-all");
+
+ // Filter scopes get added to the selector in wierd ways.
+ EHFilterScope &Filter = cast<EHFilterScope>(*I);
+ HasEHFilter = true;
+
+ // Add all the filter values which we aren't already explicitly
+ // catching.
+ for (unsigned I = 0, E = Filter.getNumFilters(); I != E; ++I) {
+ llvm::Value *FV = Filter.getFilter(I);
+ if (!EHHandlers.count(FV))
+ EHFilters.push_back(FV);
+ }
+ goto done;
+ }
+
+ case EHScope::Terminate:
+ // Terminate scopes are basically catch-alls.
+ assert(!CatchAll.Block);
+ CatchAll.Block = getTerminateHandler();
+ CatchAll.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ goto done;
+
+ case EHScope::Catch:
+ break;
+ }
+
+ EHCatchScope &Catch = cast<EHCatchScope>(*I);
+ for (unsigned HI = 0, HE = Catch.getNumHandlers(); HI != HE; ++HI) {
+ EHCatchScope::Handler Handler = Catch.getHandler(HI);
+
+ // Catch-all. We should only have one of these per catch.
+ if (!Handler.Type) {
+ assert(!CatchAll.Block);
+ CatchAll.Block = Handler.Block;
+ CatchAll.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ continue;
+ }
+
+ // Check whether we already have a handler for this type.
+ JumpDest &Dest = EHHandlers[Handler.Type];
+ if (Dest.Block) continue;
+
+ EHSelector.push_back(Handler.Type);
+ Dest.Block = Handler.Block;
+ Dest.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ }
+
+ // Stop if we found a catch-all.
+ if (CatchAll.Block) break;
+ }
+
+ done:
+ unsigned LastToEmitInLoop = EHSelector.size();
+
+ // If we have a catch-all, add null to the selector.
+ if (CatchAll.Block) {
+ EHSelector.push_back(getCatchAllValue(CGF));
+
+ // If we have an EH filter, we need to add those handlers in the
+ // right place in the selector, which is to say, at the end.
+ } else if (HasEHFilter) {
+ // Create a filter expression: an integer constant saying how many
+ // filters there are (+1 to avoid ambiguity with 0 for cleanup),
+ // followed by the filter types. The personality routine only
+ // lands here if the filter doesn't match.
+ EHSelector.push_back(llvm::ConstantInt::get(Builder.getInt32Ty(),
+ EHFilters.size() + 1));
+ EHSelector.append(EHFilters.begin(), EHFilters.end());
+
+ // Also check whether we need a cleanup.
+ if (UseInvokeInlineHack || HasEHCleanup)
+ EHSelector.push_back(UseInvokeInlineHack
+ ? getCatchAllValue(CGF)
+ : getCleanupValue(CGF));
+
+ // Otherwise, signal that we at least have cleanups.
+ } else if (UseInvokeInlineHack || HasEHCleanup) {
+ EHSelector.push_back(UseInvokeInlineHack
+ ? getCatchAllValue(CGF)
+ : getCleanupValue(CGF));
+ } else {
+ assert(LastToEmitInLoop > 2);
+ LastToEmitInLoop--;
+ }
+
+ assert(EHSelector.size() >= 3 && "selector call has only two arguments!");
+
+ // Tell the backend how to generate the landing pad.
+ llvm::CallInst *Selection =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
+ EHSelector.begin(), EHSelector.end(), "eh.selector");
+ Selection->setDoesNotThrow();
+
+ // Select the right handler.
llvm::Value *llvm_eh_typeid_for =
CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
- // Exception object
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
-
- SelectorArgs.push_back(Exc);
- SelectorArgs.push_back(Personality);
-
- bool HasCatchAll = false;
- for (unsigned i = 0; i<S.getNumHandlers(); ++i) {
- const CXXCatchStmt *C = S.getHandler(i);
- VarDecl *CatchParam = C->getExceptionDecl();
- if (CatchParam) {
- // C++ [except.handle]p3 indicates that top-level cv-qualifiers
- // are ignored.
- QualType CaughtType = C->getCaughtType().getNonReferenceType();
- llvm::Value *EHTypeInfo
- = CGM.GetAddrOfRTTIDescriptor(CaughtType.getUnqualifiedType(), true);
- SelectorArgs.push_back(EHTypeInfo);
+
+ // The results of llvm_eh_typeid_for aren't reliable --- at least
+ // not locally --- so we basically have to do this as an 'if' chain.
+ // We walk through the first N-1 catch clauses, testing and chaining,
+ // and then fall into the final clause (which is either a cleanup, a
+ // filter (possibly with a cleanup), a catch-all, or another catch).
+ for (unsigned I = 2; I != LastToEmitInLoop; ++I) {
+ llvm::Value *Type = EHSelector[I];
+ JumpDest Dest = EHHandlers[Type];
+ assert(Dest.Block && "no handler entry for value in selector?");
+
+ // Figure out where to branch on a match. As a debug code-size
+ // optimization, if the scope depth matches the innermost cleanup,
+ // we branch directly to the catch handler.
+ llvm::BasicBlock *Match = Dest.Block;
+ bool MatchNeedsCleanup = Dest.ScopeDepth != EHStack.getInnermostEHCleanup();
+ if (MatchNeedsCleanup)
+ Match = createBasicBlock("eh.match");
+
+ llvm::BasicBlock *Next = createBasicBlock("eh.next");
+
+ // Check whether the exception matches.
+ llvm::CallInst *Id
+ = Builder.CreateCall(llvm_eh_typeid_for,
+ Builder.CreateBitCast(Type, CGM.PtrToInt8Ty));
+ Id->setDoesNotThrow();
+ Builder.CreateCondBr(Builder.CreateICmpEQ(Selection, Id),
+ Match, Next);
+
+ // Emit match code if necessary.
+ if (MatchNeedsCleanup) {
+ EmitBlock(Match);
+ EmitBranchThroughEHCleanup(Dest);
+ }
+
+ // Continue to the next match.
+ EmitBlock(Next);
+ }
+
+ // Emit the final case in the selector.
+ // This might be a catch-all....
+ if (CatchAll.Block) {
+ assert(isa<llvm::ConstantPointerNull>(EHSelector.back()));
+ EmitBranchThroughEHCleanup(CatchAll);
+
+ // ...or an EH filter...
+ } else if (HasEHFilter) {
+ llvm::Value *SavedSelection = Selection;
+
+ // First, unwind out to the outermost scope if necessary.
+ if (EHStack.hasEHCleanups()) {
+ // The end here might not dominate the beginning, so we might need to
+ // save the selector if we need it.
+ llvm::AllocaInst *SelectorVar = 0;
+ if (HasEHCleanup) {
+ SelectorVar = CreateTempAlloca(Builder.getInt32Ty(), "selector.var");
+ Builder.CreateStore(Selection, SelectorVar);
+ }
+
+ llvm::BasicBlock *CleanupContBB = createBasicBlock("ehspec.cleanup.cont");
+ EmitBranchThroughEHCleanup(JumpDest(CleanupContBB, EHStack.stable_end()));
+ EmitBlock(CleanupContBB);
+
+ if (HasEHCleanup)
+ SavedSelection = Builder.CreateLoad(SelectorVar, "ehspec.saved-selector");
+ }
+
+ // If there was a cleanup, we'll need to actually check whether we
+ // landed here because the filter triggered.
+ if (UseInvokeInlineHack || HasEHCleanup) {
+ llvm::BasicBlock *RethrowBB = createBasicBlock("cleanup");
+ llvm::BasicBlock *UnexpectedBB = createBasicBlock("ehspec.unexpected");
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(Builder.getInt32Ty(), 0);
+ llvm::Value *FailsFilter =
+ Builder.CreateICmpSLT(SavedSelection, Zero, "ehspec.fails");
+ Builder.CreateCondBr(FailsFilter, UnexpectedBB, RethrowBB);
+
+ // The rethrow block is where we land if this was a cleanup.
+ // TODO: can this be _Unwind_Resume if the InvokeInlineHack is off?
+ EmitBlock(RethrowBB);
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ EmitBlock(UnexpectedBB);
+ }
+
+ // Call __cxa_call_unexpected. This doesn't need to be an invoke
+ // because __cxa_call_unexpected magically filters exceptions
+ // according to the last landing pad the exception was thrown
+ // into. Seriously.
+ Builder.CreateCall(getUnexpectedFn(*this),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ // ...or a normal catch handler...
+ } else if (!UseInvokeInlineHack && !HasEHCleanup) {
+ llvm::Value *Type = EHSelector.back();
+ EmitBranchThroughEHCleanup(EHHandlers[Type]);
+
+ // ...or a cleanup.
+ } else {
+ // We emit a jump to a notional label at the outermost unwind state.
+ llvm::BasicBlock *Unwind = createBasicBlock("eh.resume");
+ JumpDest Dest(Unwind, EHStack.stable_end());
+ EmitBranchThroughEHCleanup(Dest);
+
+ // The unwind block. We have to reload the exception here because
+ // we might have unwound through arbitrary blocks, so the landing
+ // pad might not dominate.
+ EmitBlock(Unwind);
+
+ // This can always be a call because we necessarily didn't find
+ // anything on the EH stack which needs our help.
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ }
+
+ // Restore the old IR generation state.
+ Builder.restoreIP(SavedIP);
+
+ return LP;
+}
+
+namespace {
+ /// A cleanup to call __cxa_end_catch. In many cases, the caught
+ /// exception type lets us state definitively that the thrown exception
+ /// type does not have a destructor. In particular:
+ /// - Catch-alls tell us nothing, so we have to conservatively
+ /// assume that the thrown exception might have a destructor.
+ /// - Catches by reference behave according to their base types.
+ /// - Catches of non-record types will only trigger for exceptions
+ /// of non-record types, which never have destructors.
+ /// - Catches of record types can trigger for arbitrary subclasses
+ /// of the caught type, so we have to assume the actual thrown
+ /// exception type might have a throwing destructor, even if the
+ /// caught type's destructor is trivial or nothrow.
+ struct CallEndCatch : EHScopeStack::LazyCleanup {
+ CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
+ bool MightThrow;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(getEndCatchFn(CGF), 0, 0);
+ }
+ };
+}
+
+/// Emits a call to __cxa_begin_catch and enters a cleanup to call
+/// __cxa_end_catch.
+///
+/// \param EndMightThrow - true if __cxa_end_catch might throw
+static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
+ llvm::Value *Exn,
+ bool EndMightThrow) {
+ llvm::CallInst *Call = CGF.Builder.CreateCall(getBeginCatchFn(CGF), Exn);
+ Call->setDoesNotThrow();
+
+ CGF.EHStack.pushLazyCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
+
+ return Call;
+}
+
+/// A "special initializer" callback for initializing a catch
+/// parameter during catch initialization.
+static void InitCatchParam(CodeGenFunction &CGF,
+ const VarDecl &CatchParam,
+ llvm::Value *ParamAddr) {
+ // Load the exception from where the landing pad saved it.
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
+
+ CanQualType CatchType =
+ CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
+ const llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
+
+ // If we're catching by reference, we can just cast the object
+ // pointer to the appropriate pointer.
+ if (isa<ReferenceType>(CatchType)) {
+ bool EndCatchMightThrow = cast<ReferenceType>(CatchType)->getPointeeType()
+ ->isRecordType();
+
+ // __cxa_begin_catch returns the adjusted object pointer.
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
+ llvm::Value *ExnCast =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
+ CGF.Builder.CreateStore(ExnCast, ParamAddr);
+ return;
+ }
+
+ // Non-aggregates (plus complexes).
+ bool IsComplex = false;
+ if (!CGF.hasAggregateLLVMType(CatchType) ||
+ (IsComplex = CatchType->isAnyComplexType())) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
+
+ // If the catch type is a pointer type, __cxa_begin_catch returns
+ // the pointer by value.
+ if (CatchType->hasPointerRepresentation()) {
+ llvm::Value *CastExn =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
+ CGF.Builder.CreateStore(CastExn, ParamAddr);
+ return;
+ }
+
+ // Otherwise, it returns a pointer into the exception object.
+
+ const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+
+ if (IsComplex) {
+ CGF.StoreComplexToAddr(CGF.LoadComplexFromAddr(Cast, /*volatile*/ false),
+ ParamAddr, /*volatile*/ false);
} else {
- // null indicates catch all
- SelectorArgs.push_back(Null);
- HasCatchAll = true;
+ llvm::Value *ExnLoad = CGF.Builder.CreateLoad(Cast, "exn.scalar");
+ CGF.EmitStoreOfScalar(ExnLoad, ParamAddr, /*volatile*/ false, CatchType);
}
+ return;
}
- // We use a cleanup unless there was already a catch all.
- if (!HasCatchAll) {
- SelectorArgs.push_back(Null);
+ // FIXME: this *really* needs to be done via a proper, Sema-emitted
+ // initializer expression.
+
+ CXXRecordDecl *RD = CatchType.getTypePtr()->getAsCXXRecordDecl();
+ assert(RD && "aggregate catch type was not a record!");
+
+ const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+
+ if (RD->hasTrivialCopyConstructor()) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, true);
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ CGF.EmitAggregateCopy(ParamAddr, Cast, CatchType);
+ return;
}
- // Find which handler was matched.
- llvm::Value *Selector
- = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(),
- SelectorArgs.end(), "selector");
- for (unsigned i = 0; i<S.getNumHandlers(); ++i) {
- const CXXCatchStmt *C = S.getHandler(i);
- VarDecl *CatchParam = C->getExceptionDecl();
- Stmt *CatchBody = C->getHandlerBlock();
-
- llvm::BasicBlock *Next = 0;
-
- if (SelectorArgs[i+2] != Null) {
- llvm::BasicBlock *Match = createBasicBlock("match");
- Next = createBasicBlock("catch.next");
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
- llvm::Value *Id
- = Builder.CreateCall(llvm_eh_typeid_for,
- Builder.CreateBitCast(SelectorArgs[i+2],
- Int8PtrTy));
- Builder.CreateCondBr(Builder.CreateICmpEQ(Selector, Id),
- Match, Next);
- EmitBlock(Match);
+ // We have to call __cxa_get_exception_ptr to get the adjusted
+ // pointer before copying.
+ llvm::CallInst *AdjustedExn =
+ CGF.Builder.CreateCall(getGetExceptionPtrFn(CGF), Exn);
+ AdjustedExn->setDoesNotThrow();
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+
+ CXXConstructorDecl *CD = RD->getCopyConstructor(CGF.getContext(), 0);
+ assert(CD && "record has no copy constructor!");
+ llvm::Value *CopyCtor = CGF.CGM.GetAddrOfCXXConstructor(CD, Ctor_Complete);
+
+ CallArgList CallArgs;
+ CallArgs.push_back(std::make_pair(RValue::get(ParamAddr),
+ CD->getThisType(CGF.getContext())));
+ CallArgs.push_back(std::make_pair(RValue::get(Cast),
+ CD->getParamDecl(0)->getType()));
+
+ const FunctionProtoType *FPT
+ = CD->getType()->getAs<FunctionProtoType>();
+
+ // Call the copy ctor in a terminate scope.
+ CGF.EHStack.pushTerminate();
+ CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+ CopyCtor, ReturnValueSlot(), CallArgs, CD);
+ CGF.EHStack.popTerminate();
+
+ // Finally we can call __cxa_begin_catch.
+ CallBeginCatch(CGF, Exn, true);
+}
+
+/// Begins a catch statement by initializing the catch variable and
+/// calling __cxa_begin_catch.
+static void BeginCatch(CodeGenFunction &CGF,
+ const CXXCatchStmt *S) {
+ // We have to be very careful with the ordering of cleanups here:
+ // C++ [except.throw]p4:
+ // The destruction [of the exception temporary] occurs
+ // immediately after the destruction of the object declared in
+ // the exception-declaration in the handler.
+ //
+ // So the precise ordering is:
+ // 1. Construct catch variable.
+ // 2. __cxa_begin_catch
+ // 3. Enter __cxa_end_catch cleanup
+ // 4. Enter dtor cleanup
+ //
+ // We do this by initializing the exception variable with a
+ // "special initializer", InitCatchParam. Delegation sequence:
+ // - ExitCXXTryStmt opens a RunCleanupsScope
+ // - EmitLocalBlockVarDecl creates the variable and debug info
+ // - InitCatchParam initializes the variable from the exception
+ // - CallBeginCatch calls __cxa_begin_catch
+ // - CallBeginCatch enters the __cxa_end_catch cleanup
+ // - EmitLocalBlockVarDecl enters the variable destructor cleanup
+ // - EmitCXXTryStmt emits the code for the catch body
+ // - EmitCXXTryStmt close the RunCleanupsScope
+
+ VarDecl *CatchParam = S->getExceptionDecl();
+ if (!CatchParam) {
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
+ CallBeginCatch(CGF, Exn, true);
+ return;
+ }
+
+ // Emit the local.
+ CGF.EmitLocalBlockVarDecl(*CatchParam, &InitCatchParam);
+}
+
+namespace {
+ struct CallRethrow : EHScopeStack::LazyCleanup {
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.EmitCallOrInvoke(getReThrowFn(CGF), 0, 0);
+ }
+ };
+}
+
+void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
+ assert(CatchScope.getNumHandlers() == NumHandlers);
+
+ // Copy the handler blocks off before we pop the EH stack. Emitting
+ // the handlers might scribble on this memory.
+ llvm::SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers);
+ memcpy(Handlers.data(), CatchScope.begin(),
+ NumHandlers * sizeof(EHCatchScope::Handler));
+ EHStack.popCatch();
+
+ // The fall-through block.
+ llvm::BasicBlock *ContBB = createBasicBlock("try.cont");
+
+ // We just emitted the body of the try; jump to the continue block.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+
+ // Determine if we need an implicit rethrow for all these catch handlers.
+ bool ImplicitRethrow = false;
+ if (IsFnTryBlock)
+ ImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
+ isa<CXXConstructorDecl>(CurCodeDecl);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ llvm::BasicBlock *CatchBlock = Handlers[I].Block;
+ EmitBlock(CatchBlock);
+
+ // Catch the exception if this isn't a catch-all.
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ // Enter a cleanup scope, including the catch variable and the
+ // end-catch.
+ RunCleanupsScope CatchScope(*this);
+
+ // Initialize the catch variable and set up the cleanups.
+ BeginCatch(*this, C);
+
+ // If there's an implicit rethrow, push a normal "cleanup" to call
+ // _cxa_rethrow. This needs to happen before __cxa_end_catch is
+ // called, and so it is pushed after BeginCatch.
+ if (ImplicitRethrow)
+ EHStack.pushLazyCleanup<CallRethrow>(NormalCleanup);
+
+ // Perform the body of the catch.
+ EmitStmt(C->getHandlerBlock());
+
+ // Fall out through the catch cleanups.
+ CatchScope.ForceCleanup();
+
+ // Branch out of the try.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+ }
+
+ EmitBlock(ContBB);
+}
+
+/// Enters a finally block for an implementation using zero-cost
+/// exceptions. This is mostly general, but hard-codes some
+/// language/ABI-specific behavior in the catch-all sections.
+CodeGenFunction::FinallyInfo
+CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
+ llvm::Constant *BeginCatchFn,
+ llvm::Constant *EndCatchFn,
+ llvm::Constant *RethrowFn) {
+ assert((BeginCatchFn != 0) == (EndCatchFn != 0) &&
+ "begin/end catch functions not paired");
+ assert(RethrowFn && "rethrow function is required");
+
+ // The rethrow function has one of the following two types:
+ // void (*)()
+ // void (*)(void*)
+ // In the latter case we need to pass it the exception object.
+ // But we can't use the exception slot because the @finally might
+ // have a landing pad (which would overwrite the exception slot).
+ const llvm::FunctionType *RethrowFnTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(RethrowFn->getType())
+ ->getElementType());
+ llvm::Value *SavedExnVar = 0;
+ if (RethrowFnTy->getNumParams())
+ SavedExnVar = CreateTempAlloca(Builder.getInt8PtrTy(), "finally.exn");
+
+ // A finally block is a statement which must be executed on any edge
+ // out of a given scope. Unlike a cleanup, the finally block may
+ // contain arbitrary control flow leading out of itself. In
+ // addition, finally blocks should always be executed, even if there
+ // are no catch handlers higher on the stack. Therefore, we
+ // surround the protected scope with a combination of a normal
+ // cleanup (to catch attempts to break out of the block via normal
+ // control flow) and an EH catch-all (semantically "outside" any try
+ // statement to which the finally block might have been attached).
+ // The finally block itself is generated in the context of a cleanup
+ // which conditionally leaves the catch-all.
+
+ FinallyInfo Info;
+
+ // Jump destination for performing the finally block on an exception
+ // edge. We'll never actually reach this block, so unreachable is
+ // fine.
+ JumpDest RethrowDest = getJumpDestInCurrentScope(getUnreachableBlock());
+
+ // Whether the finally block is being executed for EH purposes.
+ llvm::AllocaInst *ForEHVar = CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ "finally.for-eh");
+ InitTempAlloca(ForEHVar, llvm::ConstantInt::getFalse(getLLVMContext()));
+
+ // Enter a normal cleanup which will perform the @finally block.
+ {
+ CodeGenFunction::CleanupBlock Cleanup(*this, NormalCleanup);
+
+ // Enter a cleanup to call the end-catch function if one was provided.
+ if (EndCatchFn) {
+ CodeGenFunction::CleanupBlock FinallyExitCleanup(CGF, NormalAndEHCleanup);
+
+ llvm::BasicBlock *EndCatchBB = createBasicBlock("finally.endcatch");
+ llvm::BasicBlock *CleanupContBB = createBasicBlock("finally.cleanup.cont");
+
+ llvm::Value *ShouldEndCatch =
+ Builder.CreateLoad(ForEHVar, "finally.endcatch");
+ Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
+ EmitBlock(EndCatchBB);
+ EmitCallOrInvoke(EndCatchFn, 0, 0); // catch-all, so might throw
+ EmitBlock(CleanupContBB);
}
- llvm::BasicBlock *MatchEnd = createBasicBlock("match.end");
- llvm::BasicBlock *MatchHandler = createBasicBlock("match.handler");
-
- PushCleanupBlock(MatchEnd);
- setInvokeDest(MatchHandler);
-
- llvm::Value *ExcObject = Builder.CreateCall(getBeginCatchFn(*this), Exc);
-
- {
- CleanupScope CatchScope(*this);
- // Bind the catch parameter if it exists.
- if (CatchParam) {
- QualType CatchType = CatchParam->getType().getNonReferenceType();
- setInvokeDest(TerminateHandler);
- bool WasPointer = true;
- bool WasPointerReference = false;
- CatchType = CGM.getContext().getCanonicalType(CatchType);
- if (CatchType.getTypePtr()->isPointerType()) {
- if (isa<ReferenceType>(CatchParam->getType()))
- WasPointerReference = true;
- } else {
- if (!isa<ReferenceType>(CatchParam->getType()))
- WasPointer = false;
- CatchType = getContext().getPointerType(CatchType);
- }
- ExcObject = Builder.CreateBitCast(ExcObject, ConvertType(CatchType));
- EmitLocalBlockVarDecl(*CatchParam);
- // FIXME: we need to do this sooner so that the EH region for the
- // cleanup doesn't start until after the ctor completes, use a decl
- // init?
- CopyObject(*this, CatchParam->getType().getNonReferenceType(),
- WasPointer, WasPointerReference, ExcObject,
- GetAddrOfLocalVar(CatchParam));
- setInvokeDest(MatchHandler);
+ // Emit the finally block.
+ EmitStmt(Body);
+
+ // If the end of the finally is reachable, check whether this was
+ // for EH. If so, rethrow.
+ if (HaveInsertPoint()) {
+ llvm::BasicBlock *RethrowBB = createBasicBlock("finally.rethrow");
+ llvm::BasicBlock *ContBB = createBasicBlock("finally.cont");
+
+ llvm::Value *ShouldRethrow =
+ Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
+ Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
+
+ EmitBlock(RethrowBB);
+ if (SavedExnVar) {
+ llvm::Value *Args[] = { Builder.CreateLoad(SavedExnVar) };
+ EmitCallOrInvoke(RethrowFn, Args, Args+1);
+ } else {
+ EmitCallOrInvoke(RethrowFn, 0, 0);
}
+ Builder.CreateUnreachable();
- EmitStmt(CatchBody);
+ EmitBlock(ContBB);
}
- EmitBranchThroughCleanup(FinallyEnd);
-
- EmitBlock(MatchHandler);
-
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- llvm::Value *Args[] = {
- Exc, Personality,
- llvm::ConstantInt::getNullValue(llvm::Type::getInt32Ty(VMContext))
- };
- Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
- Builder.CreateStore(Exc, RethrowPtr);
- EmitBranchThroughCleanup(FinallyRethrow);
-
- CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock();
-
- EmitBlock(MatchEnd);
-
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getEndCatchFn(*this),
- Cont, TerminateHandler,
- &Args[0], &Args[0]);
- EmitBlock(Cont);
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
-
- Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- Builder.CreateStore(Exc, RethrowPtr);
- EmitBranchThroughCleanup(FinallyRethrow);
-
- if (Next)
- EmitBlock(Next);
+ // Leave the end-catch cleanup. As an optimization, pretend that
+ // the fallthrough path was inaccessible; we've dynamically proven
+ // that we're not in the EH case along that path.
+ if (EndCatchFn) {
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ PopCleanupBlock();
+ Builder.restoreIP(SavedIP);
+ }
+
+ // Now make sure we actually have an insertion point or the
+ // cleanup gods will hate us.
+ EnsureInsertPoint();
}
- if (!HasCatchAll) {
- Builder.CreateStore(Exc, RethrowPtr);
- EmitBranchThroughCleanup(FinallyRethrow);
+
+ // Enter a catch-all scope.
+ llvm::BasicBlock *CatchAllBB = createBasicBlock("finally.catchall");
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
+ Builder.SetInsertPoint(CatchAllBB);
+
+ // If there's a begin-catch function, call it.
+ if (BeginCatchFn) {
+ Builder.CreateCall(BeginCatchFn, Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotThrow();
}
- CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock();
+ // If we need to remember the exception pointer to rethrow later, do so.
+ if (SavedExnVar) {
+ llvm::Value *SavedExn = Builder.CreateLoad(getExceptionSlot());
+ Builder.CreateStore(SavedExn, SavedExnVar);
+ }
- setInvokeDest(PrevLandingPad);
+ // Tell the finally block that we're in EH.
+ Builder.CreateStore(llvm::ConstantInt::getTrue(getLLVMContext()), ForEHVar);
- EmitBlock(FinallyBlock);
+ // Thread a jump through the finally cleanup.
+ EmitBranchThroughCleanup(RethrowDest);
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
+ Builder.restoreIP(SavedIP);
- // Branch around the rethrow code.
- EmitBranch(FinallyEnd);
+ EHCatchScope *CatchScope = EHStack.pushCatch(1);
+ CatchScope->setCatchAllHandler(0, CatchAllBB);
- EmitBlock(FinallyRethrow);
- // FIXME: Eventually we can chain the handlers together and just do a call
- // here.
- if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont,
- getInvokeDest(),
- Builder.CreateLoad(RethrowPtr));
- EmitBlock(Cont);
- } else
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(RethrowPtr));
+ return Info;
+}
- Builder.CreateUnreachable();
+void CodeGenFunction::ExitFinallyBlock(FinallyInfo &Info) {
+ // Leave the finally catch-all.
+ EHCatchScope &Catch = cast<EHCatchScope>(*EHStack.begin());
+ llvm::BasicBlock *CatchAllBB = Catch.getHandler(0).Block;
+ EHStack.popCatch();
+
+ // And leave the normal cleanup.
+ PopCleanupBlock();
- EmitBlock(FinallyEnd);
-}
-
-CodeGenFunction::EHCleanupBlock::~EHCleanupBlock() {
- CGF.setInvokeDest(PreviousInvokeDest);
-
- llvm::BasicBlock *EndOfCleanup = CGF.Builder.GetInsertBlock();
-
- // Jump to the beginning of the cleanup.
- CGF.Builder.SetInsertPoint(CleanupHandler, CleanupHandler->begin());
-
- // The libstdc++ personality function.
- // TODO: generalize to work with other libraries.
- llvm::Constant *Personality = getPersonalityFn(CGF.CGM);
-
- // %exception = call i8* @llvm.eh.exception()
- // Magic intrinsic which tells gives us a handle to the caught
- // exception.
- llvm::Value *llvm_eh_exception =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
-
- llvm::Constant *Null = llvm::ConstantPointerNull::get(CGF.PtrToInt8Ty);
-
- // %ignored = call i32 @llvm.eh.selector(i8* %exception,
- // i8* @__gxx_personality_v0,
- // i8* null)
- // Magic intrinsic which tells LLVM that this invoke landing pad is
- // just a cleanup block.
- llvm::Value *Args[] = { Exc, Personality, Null };
- llvm::Value *llvm_eh_selector =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- CGF.Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
-
- // And then we fall through into the code that the user put there.
- // Jump back to the end of the cleanup.
- CGF.Builder.SetInsertPoint(EndOfCleanup);
-
- // Rethrow the exception.
- if (CGF.getInvokeDest()) {
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(CGF.getUnwindResumeOrRethrowFn(), Cont,
- CGF.getInvokeDest(), Exc);
- CGF.EmitBlock(Cont);
- } else
- CGF.Builder.CreateCall(CGF.getUnwindResumeOrRethrowFn(), Exc);
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ EmitBlock(CatchAllBB, true);
+
+ Builder.restoreIP(SavedIP);
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
+ if (TerminateLandingPad)
+ return TerminateLandingPad;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // This will get inserted at the end of the function.
+ TerminateLandingPad = createBasicBlock("terminate.lpad");
+ Builder.SetInsertPoint(TerminateLandingPad);
+
+ // Tell the backend that this is a landing pad.
+ llvm::CallInst *Exn =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
+ Exn->setDoesNotThrow();
+
+ // Tell the backend what the exception table should be:
+ // nothing but a catch-all.
+ llvm::Value *Args[3] = { Exn, getPersonalityFn(*this),
+ getCatchAllValue(*this) };
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
+ Args, Args+3, "eh.selector")
+ ->setDoesNotThrow();
+
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
+ TerminateCall->setDoesNotReturn();
+ TerminateCall->setDoesNotThrow();
CGF.Builder.CreateUnreachable();
- // Resume inserting where we started, but put the new cleanup
- // handler in place.
- if (PreviousInsertionBlock)
- CGF.Builder.SetInsertPoint(PreviousInsertionBlock);
- else
- CGF.Builder.ClearInsertionPoint();
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
- if (CGF.Exceptions)
- CGF.setInvokeDest(CleanupHandler);
+ return TerminateLandingPad;
}
llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
if (TerminateHandler)
return TerminateHandler;
- // We don't want to change anything at the current location, so
- // save it aside and clear the insert point.
- llvm::BasicBlock *SavedInsertBlock = Builder.GetInsertBlock();
- llvm::BasicBlock::iterator SavedInsertPoint = Builder.GetInsertPoint();
- Builder.ClearInsertionPoint();
-
- llvm::Constant *Personality = getPersonalityFn(CGM);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- // Set up terminate handler
+ // Set up the terminate handler. This block is inserted at the very
+ // end of the function by FinishFunction.
TerminateHandler = createBasicBlock("terminate.handler");
- EmitBlock(TerminateHandler);
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- llvm::Value *Args[] = {
- Exc, Personality,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1)
- };
- Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
- llvm::CallInst *TerminateCall =
- Builder.CreateCall(getTerminateFn(*this));
+ Builder.SetInsertPoint(TerminateHandler);
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
TerminateCall->setDoesNotReturn();
TerminateCall->setDoesNotThrow();
Builder.CreateUnreachable();
// Restore the saved insertion state.
- Builder.SetInsertPoint(SavedInsertBlock, SavedInsertPoint);
+ Builder.restoreIP(SavedIP);
return TerminateHandler;
}
+
+CodeGenFunction::CleanupBlock::CleanupBlock(CodeGenFunction &CGF,
+ CleanupKind Kind)
+ : CGF(CGF), SavedIP(CGF.Builder.saveIP()), NormalCleanupExitBB(0) {
+ llvm::BasicBlock *EntryBB = CGF.createBasicBlock("cleanup");
+ CGF.Builder.SetInsertPoint(EntryBB);
+
+ switch (Kind) {
+ case NormalAndEHCleanup:
+ NormalCleanupEntryBB = EHCleanupEntryBB = EntryBB;
+ break;
+
+ case NormalCleanup:
+ NormalCleanupEntryBB = EntryBB;
+ EHCleanupEntryBB = 0;
+ break;
+
+ case EHCleanup:
+ NormalCleanupEntryBB = 0;
+ EHCleanupEntryBB = EntryBB;
+ CGF.EHStack.pushTerminate();
+ break;
+ }
+}
+
+void CodeGenFunction::CleanupBlock::beginEHCleanup() {
+ assert(EHCleanupEntryBB == 0 && "already started an EH cleanup");
+ NormalCleanupExitBB = CGF.Builder.GetInsertBlock();
+ assert(NormalCleanupExitBB && "end of normal cleanup is unreachable");
+
+ EHCleanupEntryBB = CGF.createBasicBlock("eh.cleanup");
+ CGF.Builder.SetInsertPoint(EHCleanupEntryBB);
+ CGF.EHStack.pushTerminate();
+}
+
+CodeGenFunction::CleanupBlock::~CleanupBlock() {
+ llvm::BasicBlock *EHCleanupExitBB = 0;
+
+ // If we're currently writing the EH cleanup...
+ if (EHCleanupEntryBB) {
+ // Set the EH cleanup exit block.
+ EHCleanupExitBB = CGF.Builder.GetInsertBlock();
+ assert(EHCleanupExitBB && "end of EH cleanup is unreachable");
+
+ // If we're actually writing both at once, set the normal exit, too.
+ if (EHCleanupEntryBB == NormalCleanupEntryBB)
+ NormalCleanupExitBB = EHCleanupExitBB;
+
+ // Otherwise, we must have pushed a terminate handler.
+ else
+ CGF.EHStack.popTerminate();
+
+ // Otherwise, just set the normal cleanup exit block.
+ } else {
+ NormalCleanupExitBB = CGF.Builder.GetInsertBlock();
+ assert(NormalCleanupExitBB && "end of normal cleanup is unreachable");
+ }
+
+ CGF.EHStack.pushCleanup(NormalCleanupEntryBB, NormalCleanupExitBB,
+ EHCleanupEntryBB, EHCleanupExitBB);
+
+ CGF.Builder.restoreIP(SavedIP);
+}
+
+EHScopeStack::LazyCleanup::~LazyCleanup() {
+ llvm_unreachable("LazyCleanup is indestructable");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.h b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
new file mode 100644
index 0000000..80739cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
@@ -0,0 +1,428 @@
+//===-- CGException.h - Classes for exceptions IR generation ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of LLVM IR for exceptions in
+// C++ and Objective C.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGEXCEPTION_H
+#define CLANG_CODEGEN_CGEXCEPTION_H
+
+/// EHScopeStack is defined in CodeGenFunction.h, but its
+/// implementation is in this file and in CGException.cpp.
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Value;
+ class BasicBlock;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ llvm::BasicBlock *CachedLandingPad;
+
+ unsigned K : 3;
+
+protected:
+ enum { BitsRemaining = 29 };
+
+public:
+ enum Kind { Cleanup, LazyCleanup, Catch, Terminate, Filter };
+
+ EHScope(Kind K) : CachedLandingPad(0), K(K) {}
+
+ Kind getKind() const { return static_cast<Kind>(K); }
+
+ llvm::BasicBlock *getCachedLandingPad() const {
+ return CachedLandingPad;
+ }
+
+ void setCachedLandingPad(llvm::BasicBlock *Block) {
+ CachedLandingPad = Block;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C @finally blocks are represented using a cleanup scope
+/// after the catch scope.
+class EHCatchScope : public EHScope {
+ unsigned NumHandlers : BitsRemaining;
+
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null (C++ null, not an LLVM null pointer)
+ /// for a catch-all.
+ llvm::Value *Type;
+
+ /// The catch handler for this type.
+ llvm::BasicBlock *Block;
+
+ static Handler make(llvm::Value *Type, llvm::BasicBlock *Block) {
+ Handler Temp;
+ Temp.Type = Type;
+ Temp.Block = Block;
+ return Temp;
+ }
+ };
+
+private:
+ Handler *getHandlers() {
+ return reinterpret_cast<Handler*>(this+1);
+ }
+
+ const Handler *getHandlers() const {
+ return reinterpret_cast<const Handler*>(this+1);
+ }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned N) {
+ return sizeof(EHCatchScope) + N * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned NumHandlers)
+ : EHScope(Catch), NumHandlers(NumHandlers) {
+ }
+
+ unsigned getNumHandlers() const {
+ return NumHandlers;
+ }
+
+ void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
+ setHandler(I, /*catchall*/ 0, Block);
+ }
+
+ void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
+ getHandlers()[I] = Handler::make(Type, Block);
+ }
+
+ const Handler &getHandler(unsigned I) const {
+ assert(I < getNumHandlers());
+ return getHandlers()[I];
+ }
+
+ typedef const Handler *iterator;
+ iterator begin() const { return getHandlers(); }
+ iterator end() const { return getHandlers() + getNumHandlers(); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Catch;
+ }
+};
+
+/// A cleanup scope which generates the cleanup blocks lazily.
+class EHLazyCleanupScope : public EHScope {
+ /// Whether this cleanup needs to be run along normal edges.
+ bool IsNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ bool IsEHCleanup : 1;
+
+ /// The amount of extra storage needed by the LazyCleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned CleanupSize : 12;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : BitsRemaining - 14;
+
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *NormalBlock;
+
+ /// The dual entry/exit block along the EH edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *EHBlock;
+
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t Size) {
+ return sizeof(EHLazyCleanupScope) + Size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHLazyCleanupScope) + CleanupSize;
+ }
+
+ EHLazyCleanupScope(bool IsNormal, bool IsEH, unsigned CleanupSize,
+ unsigned FixupDepth,
+ EHScopeStack::stable_iterator EnclosingNormal,
+ EHScopeStack::stable_iterator EnclosingEH)
+ : EHScope(EHScope::LazyCleanup),
+ IsNormalCleanup(IsNormal), IsEHCleanup(IsEH),
+ CleanupSize(CleanupSize), FixupDepth(FixupDepth),
+ EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
+ NormalBlock(0), EHBlock(0)
+ {}
+
+ bool isNormalCleanup() const { return IsNormalCleanup; }
+ llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
+ void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
+
+ bool isEHCleanup() const { return IsEHCleanup; }
+ llvm::BasicBlock *getEHBlock() const { return EHBlock; }
+ void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
+
+ unsigned getFixupDepth() const { return FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+ EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
+ return EnclosingEH;
+ }
+
+ size_t getCleanupSize() const { return CleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::LazyCleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::LazyCleanup*>(getCleanupBuffer());
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return (Scope->getKind() == LazyCleanup);
+ }
+};
+
+/// A scope which needs to execute some code if we try to unwind ---
+/// either normally, via the EH mechanism, or both --- through it.
+class EHCleanupScope : public EHScope {
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : BitsRemaining;
+
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ llvm::BasicBlock *NormalEntry;
+ llvm::BasicBlock *NormalExit;
+ llvm::BasicBlock *EHEntry;
+ llvm::BasicBlock *EHExit;
+
+public:
+ static size_t getSize() { return sizeof(EHCleanupScope); }
+
+ EHCleanupScope(unsigned FixupDepth,
+ EHScopeStack::stable_iterator EnclosingNormal,
+ EHScopeStack::stable_iterator EnclosingEH,
+ llvm::BasicBlock *NormalEntry, llvm::BasicBlock *NormalExit,
+ llvm::BasicBlock *EHEntry, llvm::BasicBlock *EHExit)
+ : EHScope(Cleanup), FixupDepth(FixupDepth),
+ EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
+ NormalEntry(NormalEntry), NormalExit(NormalExit),
+ EHEntry(EHEntry), EHExit(EHExit) {
+ assert((NormalEntry != 0) == (NormalExit != 0));
+ assert((EHEntry != 0) == (EHExit != 0));
+ }
+
+ bool isNormalCleanup() const { return NormalEntry != 0; }
+ bool isEHCleanup() const { return EHEntry != 0; }
+
+ llvm::BasicBlock *getNormalEntry() const { return NormalEntry; }
+ llvm::BasicBlock *getNormalExit() const { return NormalExit; }
+ llvm::BasicBlock *getEHEntry() const { return EHEntry; }
+ llvm::BasicBlock *getEHExit() const { return EHExit; }
+ unsigned getFixupDepth() const { return FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+ EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
+ return EnclosingEH;
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Cleanup;
+ }
+};
+
+/// An exceptions scope which filters exceptions thrown through it.
+/// Only exceptions matching the filter types will be permitted to be
+/// thrown.
+///
+/// This is used to implement C++ exception specifications.
+class EHFilterScope : public EHScope {
+ unsigned NumFilters : BitsRemaining;
+
+ // Essentially ends in a flexible array member:
+ // llvm::Value *FilterTypes[0];
+
+ llvm::Value **getFilters() {
+ return reinterpret_cast<llvm::Value**>(this+1);
+ }
+
+ llvm::Value * const *getFilters() const {
+ return reinterpret_cast<llvm::Value* const *>(this+1);
+ }
+
+public:
+ EHFilterScope(unsigned NumFilters) :
+ EHScope(Filter), NumFilters(NumFilters) {}
+
+ static size_t getSizeForNumFilters(unsigned NumFilters) {
+ return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*);
+ }
+
+ unsigned getNumFilters() const { return NumFilters; }
+
+ void setFilter(unsigned I, llvm::Value *FilterValue) {
+ assert(I < getNumFilters());
+ getFilters()[I] = FilterValue;
+ }
+
+ llvm::Value *getFilter(unsigned I) const {
+ assert(I < getNumFilters());
+ return getFilters()[I];
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Filter;
+ }
+};
+
+/// An exceptions scope which calls std::terminate if any exception
+/// reaches it.
+class EHTerminateScope : public EHScope {
+public:
+ EHTerminateScope() : EHScope(Terminate) {}
+ static size_t getSize() { return sizeof(EHTerminateScope); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Terminate;
+ }
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *Ptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *Ptr) : Ptr(Ptr) {}
+
+public:
+ iterator() : Ptr(0) {}
+
+ EHScope *get() const {
+ return reinterpret_cast<EHScope*>(Ptr);
+ }
+
+ EHScope *operator->() const { return get(); }
+ EHScope &operator*() const { return *get(); }
+
+ iterator &operator++() {
+ switch (get()->getKind()) {
+ case EHScope::Catch:
+ Ptr += EHCatchScope::getSizeForNumHandlers(
+ static_cast<const EHCatchScope*>(get())->getNumHandlers());
+ break;
+
+ case EHScope::Filter:
+ Ptr += EHFilterScope::getSizeForNumFilters(
+ static_cast<const EHFilterScope*>(get())->getNumFilters());
+ break;
+
+ case EHScope::LazyCleanup:
+ Ptr += static_cast<const EHLazyCleanupScope*>(get())
+ ->getAllocatedSize();
+ break;
+
+ case EHScope::Cleanup:
+ Ptr += EHCleanupScope::getSize();
+ break;
+
+ case EHScope::Terminate:
+ Ptr += EHTerminateScope::getSize();
+ break;
+ }
+
+ return *this;
+ }
+
+ iterator next() {
+ iterator copy = *this;
+ ++copy;
+ return copy;
+ }
+
+ iterator operator++(int) {
+ iterator copy = *this;
+ operator++();
+ return copy;
+ }
+
+ bool operator==(iterator other) const { return Ptr == other.Ptr; }
+ bool operator!=(iterator other) const { return Ptr != other.Ptr; }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(StartOfData);
+}
+
+inline EHScopeStack::iterator EHScopeStack::end() const {
+ return iterator(EndOfBuffer);
+}
+
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCatchScope>(*begin()));
+ StartOfData += EHCatchScope::getSizeForNumHandlers(
+ cast<EHCatchScope>(*begin()).getNumHandlers());
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline void EHScopeStack::popTerminate() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHTerminateScope>(*begin()));
+ StartOfData += EHTerminateScope::getSize();
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
+ assert(sp.isValid() && "finding invalid savepoint");
+ assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
+ return iterator(EndOfBuffer - sp.Size);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::stabilize(iterator ir) const {
+ assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
+ return stable_iterator(EndOfBuffer - ir.Ptr);
+}
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
index d67618b..43bab9f 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -19,7 +19,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "llvm/Intrinsics.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
@@ -44,8 +44,8 @@ void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
}
-llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty,
- const llvm::Twine &Name) {
+llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
+ const llvm::Twine &Name) {
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
// FIXME: Should we prefer the preferred type alignment here?
CharUnits Align = getContext().getTypeAlignInChars(Ty);
@@ -53,8 +53,8 @@ llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty,
return Alloc;
}
-llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty,
- const llvm::Twine &Name) {
+llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
+ const llvm::Twine &Name) {
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
// FIXME: Should we prefer the preferred type alignment here?
CharUnits Align = getContext().getTypeAlignInChars(Ty);
@@ -168,49 +168,62 @@ struct SubobjectAdjustment {
}
};
-RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
- bool IsInitializer) {
- bool ShouldDestroyTemporaries = false;
- unsigned OldNumLiveTemporaries = 0;
+static llvm::Value *
+CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
+ const NamedDecl *InitializedDecl) {
+ if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (VD->hasGlobalStorage()) {
+ llvm::SmallString<256> Name;
+ CGF.CGM.getMangleContext().mangleReferenceTemporary(VD, Name);
+
+ const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
+
+ // Create the reference temporary.
+ llvm::GlobalValue *RefTemp =
+ new llvm::GlobalVariable(CGF.CGM.getModule(),
+ RefTempTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(RefTempTy),
+ Name.str());
+ return RefTemp;
+ }
+ }
+
+ return CGF.CreateMemTemp(Type, "ref.tmp");
+}
+static llvm::Value *
+EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
+ llvm::Value *&ReferenceTemporary,
+ const CXXDestructorDecl *&ReferenceTemporaryDtor,
+ const NamedDecl *InitializedDecl) {
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
E = DAE->getExpr();
-
+
if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) {
- ShouldDestroyTemporaries = true;
-
- // Keep track of the current cleanup stack depth.
- OldNumLiveTemporaries = LiveTemporaries.size();
-
- E = TE->getSubExpr();
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+
+ return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
+ ReferenceTemporary,
+ ReferenceTemporaryDtor,
+ InitializedDecl);
}
-
- RValue Val;
- if (E->isLvalue(getContext()) == Expr::LV_Valid) {
- // Emit the expr as an lvalue.
- LValue LV = EmitLValue(E);
- if (LV.isSimple()) {
- if (ShouldDestroyTemporaries) {
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
- }
-
- return RValue::get(LV.getAddress());
- }
-
- Val = EmitLoadOfLValue(LV, E->getType());
+
+ RValue RV;
+ if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) {
+ // Emit the expression as an lvalue.
+ LValue LV = CGF.EmitLValue(E);
+
+ if (LV.isSimple())
+ return LV.getAddress();
- if (ShouldDestroyTemporaries) {
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
- }
+ // We have to load the lvalue.
+ RV = CGF.EmitLoadOfLValue(LV, E->getType());
} else {
QualType ResultTy = E->getType();
-
+
llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
- do {
+ while (true) {
if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
E = PE->getSubExpr();
continue;
@@ -233,7 +246,7 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
continue;
}
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- if (ME->getBase()->isLvalue(getContext()) != Expr::LV_Valid &&
+ if (ME->getBase()->isLvalue(CGF.getContext()) != Expr::LV_Valid &&
ME->getBase()->getType()->isRecordType()) {
if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
E = ME->getBase();
@@ -246,63 +259,46 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
// Nothing changed.
break;
- } while (true);
-
- Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false,
- IsInitializer);
-
- if (ShouldDestroyTemporaries) {
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
- }
+ }
- if (IsInitializer) {
- // We might have to destroy the temporary variable.
+ // Create a reference temporary if necessary.
+ if (CGF.hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType())
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+
+ RV = CGF.EmitAnyExpr(E, ReferenceTemporary, /*IsAggLocVolatile=*/false,
+ /*IgnoreResult=*/false, InitializedDecl);
+
+ if (InitializedDecl) {
+ // Get the destructor for the reference temporary.
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
- if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (!ClassDecl->hasTrivialDestructor()) {
- const CXXDestructorDecl *Dtor =
- ClassDecl->getDestructor(getContext());
-
- {
- DelayedCleanupBlock Scope(*this);
- EmitCXXDestructorCall(Dtor, Dtor_Complete,
- /*ForVirtualBase=*/false,
- Val.getAggregateAddr());
-
- // Make sure to jump to the exit block.
- EmitBranch(Scope.getCleanupExitBlock());
- }
- if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
- EmitCXXDestructorCall(Dtor, Dtor_Complete,
- /*ForVirtualBase=*/false,
- Val.getAggregateAddr());
- }
- }
- }
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!ClassDecl->hasTrivialDestructor())
+ ReferenceTemporaryDtor = ClassDecl->getDestructor();
}
}
-
+
// Check if need to perform derived-to-base casts and/or field accesses, to
// get from the temporary object we created (and, potentially, for which we
// extended the lifetime) to the subobject we're binding the reference to.
if (!Adjustments.empty()) {
- llvm::Value *Object = Val.getAggregateAddr();
+ llvm::Value *Object = RV.getAggregateAddr();
for (unsigned I = Adjustments.size(); I != 0; --I) {
SubobjectAdjustment &Adjustment = Adjustments[I-1];
switch (Adjustment.Kind) {
case SubobjectAdjustment::DerivedToBaseAdjustment:
- Object = GetAddressOfBaseClass(Object,
- Adjustment.DerivedToBase.DerivedClass,
- *Adjustment.DerivedToBase.BasePath,
- /*NullCheckValue=*/false);
+ Object =
+ CGF.GetAddressOfBaseClass(Object,
+ Adjustment.DerivedToBase.DerivedClass,
+ *Adjustment.DerivedToBase.BasePath,
+ /*NullCheckValue=*/false);
break;
case SubobjectAdjustment::FieldAdjustment: {
unsigned CVR = Adjustment.Field.CVRQualifiers;
- LValue LV = EmitLValueForField(Object, Adjustment.Field.Field, CVR);
+ LValue LV =
+ CGF.EmitLValueForField(Object, Adjustment.Field.Field, CVR);
if (LV.isSimple()) {
Object = LV.getAddress();
break;
@@ -312,36 +308,72 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
// the object we're binding to.
QualType T = Adjustment.Field.Field->getType().getNonReferenceType()
.getUnqualifiedType();
- Object = CreateTempAlloca(ConvertType(T), "lv");
- EmitStoreThroughLValue(EmitLoadOfLValue(LV, T),
- LValue::MakeAddr(Object,
- Qualifiers::fromCVRMask(CVR)),
- T);
+ Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
+ LValue TempLV = LValue::MakeAddr(Object,
+ Qualifiers::fromCVRMask(CVR));
+ CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV, T), TempLV, T);
break;
}
+
}
}
- const llvm::Type *ResultPtrTy
- = llvm::PointerType::get(ConvertType(ResultTy), 0);
- Object = Builder.CreateBitCast(Object, ResultPtrTy, "temp");
- return RValue::get(Object);
+ const llvm::Type *ResultPtrTy = CGF.ConvertType(ResultTy)->getPointerTo();
+ return CGF.Builder.CreateBitCast(Object, ResultPtrTy, "temp");
}
}
- if (Val.isAggregate()) {
- Val = RValue::get(Val.getAggregateAddr());
- } else {
- // Create a temporary variable that we can bind the reference to.
- llvm::Value *Temp = CreateMemTemp(E->getType(), "reftmp");
- if (Val.isScalar())
- EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType());
- else
- StoreComplexToAddr(Val.getComplexVal(), Temp, false);
- Val = RValue::get(Temp);
+ if (RV.isAggregate())
+ return RV.getAggregateAddr();
+
+ // Create a temporary variable that we can bind the reference to.
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+
+ if (RV.isScalar())
+ CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
+ /*Volatile=*/false, E->getType());
+ else
+ CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
+ /*Volatile=*/false);
+ return ReferenceTemporary;
+}
+
+RValue
+CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
+ const NamedDecl *InitializedDecl) {
+ llvm::Value *ReferenceTemporary = 0;
+ const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
+ llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
+ ReferenceTemporaryDtor,
+ InitializedDecl);
+
+ if (!ReferenceTemporaryDtor)
+ return RValue::get(Value);
+
+ // Make sure to call the destructor for the reference temporary.
+ if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (VD->hasGlobalStorage()) {
+ llvm::Constant *DtorFn =
+ CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
+ CGF.EmitCXXGlobalDtorRegistration(DtorFn,
+ cast<llvm::Constant>(ReferenceTemporary));
+
+ return RValue::get(Value);
+ }
+ }
+
+ CleanupBlock Cleanup(*this, NormalCleanup);
+ EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, ReferenceTemporary);
+
+ if (Exceptions) {
+ Cleanup.beginEHCleanup();
+ EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, ReferenceTemporary);
}
- return Val;
+ return RValue::get(Value);
}
@@ -359,118 +391,28 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
if (!CatchUndefined)
return;
- const llvm::Type *Size_tTy
- = llvm::IntegerType::get(VMContext, LLVMPointerWidth);
Address = Builder.CreateBitCast(Address, PtrToInt8Ty);
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &Size_tTy, 1);
- const llvm::IntegerType *Int1Ty = llvm::IntegerType::get(VMContext, 1);
+ const llvm::Type *IntPtrT = IntPtrTy;
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1);
+ const llvm::IntegerType *Int1Ty = llvm::Type::getInt1Ty(VMContext);
// In time, people may want to control this and use a 1 here.
llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0);
llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
llvm::BasicBlock *Cont = createBasicBlock();
llvm::BasicBlock *Check = createBasicBlock();
- llvm::Value *NegativeOne = llvm::ConstantInt::get(Size_tTy, -1ULL);
+ llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
EmitBlock(Check);
Builder.CreateCondBr(Builder.CreateICmpUGE(C,
- llvm::ConstantInt::get(Size_tTy, Size)),
+ llvm::ConstantInt::get(IntPtrTy, Size)),
Cont, getTrapBB());
EmitBlock(Cont);
}
-llvm::Value *CodeGenFunction::
-EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
- bool isInc, bool isPre) {
- QualType ValTy = E->getSubExpr()->getType();
- llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy).getScalarVal();
-
- int AmountVal = isInc ? 1 : -1;
-
- if (ValTy->isPointerType() &&
- ValTy->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition/subtraction needs to account for the VLA size
- ErrorUnsupported(E, "VLA pointer inc/dec");
- }
-
- llvm::Value *NextVal;
- if (const llvm::PointerType *PT =
- dyn_cast<llvm::PointerType>(InVal->getType())) {
- llvm::Constant *Inc =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal);
- if (!isa<llvm::FunctionType>(PT->getElementType())) {
- QualType PTEE = ValTy->getPointeeType();
- if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) {
- // Handle interface types, which are not represented with a concrete
- // type.
- int size = getContext().getTypeSize(OIT) / 8;
- if (!isInc)
- size = -size;
- Inc = llvm::ConstantInt::get(Inc->getType(), size);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- InVal = Builder.CreateBitCast(InVal, i8Ty);
- NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
- llvm::Value *lhs = LV.getAddress();
- lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
- LV = LValue::MakeAddr(lhs, MakeQualifiers(ValTy));
- } else
- NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
- } else {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
- NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
- NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
- }
- } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) {
- // Bool++ is an interesting case, due to promotion rules, we get:
- // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
- // Bool = ((int)Bool+1) != 0
- // An interesting aspect of this is that increment is always true.
- // Decrement does not have this property.
- NextVal = llvm::ConstantInt::getTrue(VMContext);
- } else if (isa<llvm::IntegerType>(InVal->getType())) {
- NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
-
- // Signed integer overflow is undefined behavior.
- if (ValTy->isSignedIntegerType())
- NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
- else
- NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
- } else {
- // Add the inc/dec to the real part.
- if (InVal->getType()->isFloatTy())
- NextVal =
- llvm::ConstantFP::get(VMContext,
- llvm::APFloat(static_cast<float>(AmountVal)));
- else if (InVal->getType()->isDoubleTy())
- NextVal =
- llvm::ConstantFP::get(VMContext,
- llvm::APFloat(static_cast<double>(AmountVal)));
- else {
- llvm::APFloat F(static_cast<float>(AmountVal));
- bool ignored;
- F.convert(Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
- &ignored);
- NextVal = llvm::ConstantFP::get(VMContext, F);
- }
- NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
- }
-
- // Store the updated result through the lvalue.
- if (LV.isBitField())
- EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal);
- else
- EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
-
- // If this is a postinc, return the value read from memory, otherwise use the
- // updated value.
- return isPre ? NextVal : InVal;
-}
-
-
CodeGenFunction::ComplexPairTy CodeGenFunction::
EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
@@ -568,6 +510,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
switch (E->getStmtClass()) {
default: return EmitUnsupportedLValue(E, "l-value expression");
+ case Expr::ObjCSelectorExprClass:
+ return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
case Expr::ObjCIsaExprClass:
return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
case Expr::BinaryOperatorClass:
@@ -600,8 +544,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
case Expr::CXXExprWithTemporariesClass:
return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E));
- case Expr::CXXZeroInitValueExprClass:
- return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E));
+ case Expr::CXXScalarValueInitExprClass:
+ return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
case Expr::CXXDefaultArgExprClass:
return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
case Expr::CXXTypeidExprClass:
@@ -816,8 +760,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
const VectorType *ExprVT = ExprType->getAs<VectorType>();
if (!ExprVT) {
unsigned InIdx = getAccessedFieldNo(0, Elts);
- llvm::Value *Elt = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), InIdx);
+ llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
}
@@ -827,8 +770,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
llvm::SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumResultElts; ++i) {
unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), InIdx));
+ Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
}
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
@@ -1044,8 +986,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
for (unsigned i = 0; i != NumSrcElts; ++i) {
unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask[InIdx] = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), i);
+ Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
}
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
@@ -1058,7 +999,6 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// FIXME: since we're shuffling with undef, can we just use the indices
// into that? This could be simpler.
llvm::SmallVector<llvm::Constant*, 4> ExtMask;
- const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
unsigned i;
for (i = 0; i != NumSrcElts; ++i)
ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
@@ -1089,7 +1029,6 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
} else {
// If the Src is a scalar (not a vector) it must be updating one element.
unsigned InIdx = getAccessedFieldNo(0, Elts);
- const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
}
@@ -1401,6 +1340,22 @@ llvm::BasicBlock *CodeGenFunction::getTrapBB() {
return TrapBB;
}
+/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
+/// array to pointer, return the array subexpression.
+static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
+ // If this isn't just an array->pointer decay, bail out.
+ const CastExpr *CE = dyn_cast<CastExpr>(E);
+ if (CE == 0 || CE->getCastKind() != CastExpr::CK_ArrayToPointerDecay)
+ return 0;
+
+ // If this is a decay from variable width array, bail out.
+ const Expr *SubExpr = CE->getSubExpr();
+ if (SubExpr->getType()->isVariableArrayType())
+ return 0;
+
+ return SubExpr;
+}
+
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// The index must always be an integer, which is not an aggregate. Emit it.
llvm::Value *Idx = EmitScalarExpr(E->getIdx());
@@ -1413,25 +1368,19 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
- Idx = Builder.CreateIntCast(Idx,
- llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx");
+ Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vidx");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
E->getBase()->getType().getCVRQualifiers());
}
- // The base must be a pointer, which is not an aggregate. Emit it.
- llvm::Value *Base = EmitScalarExpr(E->getBase());
-
// Extend or truncate the index type to 32 or 64-bits.
- unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (IdxBitwidth != LLVMPointerWidth)
- Idx = Builder.CreateIntCast(Idx,
- llvm::IntegerType::get(VMContext, LLVMPointerWidth),
+ if (!Idx->getType()->isIntegerTy(LLVMPointerWidth))
+ Idx = Builder.CreateIntCast(Idx, IntPtrTy,
IdxSigned, "idxprom");
-
+
// FIXME: As llvm implements the object size checking, this can come out.
if (CatchUndefined) {
- if (const ImplicitCastExpr *ICE=dyn_cast<ImplicitCastExpr>(E->getBase())) {
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) {
if (const ConstantArrayType *CAT
@@ -1463,9 +1412,13 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
Idx = Builder.CreateUDiv(Idx,
llvm::ConstantInt::get(Idx->getType(),
BaseTypeSize.getQuantity()));
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
- } else if (const ObjCObjectType *OIT =
- E->getType()->getAs<ObjCObjectType>()) {
+ } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
+ // Indexing over an interface, as in "NSString *P; P[4];"
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
getContext().getTypeSizeInChars(OIT).getQuantity());
@@ -1473,10 +1426,27 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
Idx = Builder.CreateMul(Idx, InterfaceSize);
const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
Idx, "arrayidx");
Address = Builder.CreateBitCast(Address, Base->getType());
+ } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
+ // If this is A[i] where A is an array, the frontend will have decayed the
+ // base to be a ArrayToPointerDecay implicit cast. While correct, it is
+ // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
+ // "gep x, i" here. Emit one "gep A, 0, i".
+ assert(Array->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+ llvm::Value *ArrayPtr = EmitLValue(Array).getAddress();
+ llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
+ llvm::Value *Args[] = { Zero, Idx };
+
+ Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, Args+2, "arrayidx");
} else {
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
}
@@ -1501,17 +1471,15 @@ llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
llvm::SmallVector<unsigned, 4> &Elts) {
llvm::SmallVector<llvm::Constant*, 4> CElts;
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
- CElts.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), Elts[i]));
+ CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
return llvm::ConstantVector::get(&CElts[0], CElts.size());
}
LValue CodeGenFunction::
EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
- const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
-
// Emit the base vector as an l-value.
LValue Base;
@@ -1816,10 +1784,18 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *This;
+ if (LV.isPropertyRef()) {
+ RValue RV = EmitLoadOfPropertyRefLValue(LV, E->getSubExpr()->getType());
+ assert (!RV.isScalar() && "EmitCastLValue");
+ This = RV.getAggregateAddr();
+ }
+ else
+ This = LV.getAddress();
// Perform the derived-to-base conversion
llvm::Value *Base =
- GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl,
+ GetAddressOfBaseClass(This, DerivedClassDecl,
E->getBasePath(), /*NullCheckValue=*/false);
return LValue::MakeAddr(Base, MakeQualifiers(E->getType()));
@@ -1840,7 +1816,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
return LValue::MakeAddr(Derived, MakeQualifiers(E->getType()));
}
- case CastExpr::CK_BitCast: {
+ case CastExpr::CK_LValueBitCast: {
// This must be a reinterpret_cast (or c-style equivalent).
const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
@@ -1853,7 +1829,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
}
LValue CodeGenFunction::EmitNullInitializationLValue(
- const CXXZeroInitValueExpr *E) {
+ const CXXScalarValueInitExpr *E) {
QualType Ty = E->getType();
LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty));
EmitNullInitialization(LV.getAddress(), Ty);
@@ -1966,15 +1942,28 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
LValue LV = EmitLValue(E->getSubExpr());
- PushCXXTemporary(E->getTemporary(), LV.getAddress());
+ EmitCXXTemporary(E->getTemporary(), LV.getAddress());
return LV;
}
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
- // Can only get l-value for message expression returning aggregate type
RValue RV = EmitObjCMessageExpr(E);
- // FIXME: can this be volatile?
- return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
+
+ if (!RV.isScalar())
+ return LValue::MakeAddr(RV.getAggregateAddr(),
+ MakeQualifiers(E->getType()));
+
+ assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType()));
+}
+
+LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
+ llvm::Value *V =
+ CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
+ return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
}
llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
index a4e64fb..219a5f9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
@@ -127,7 +127,7 @@ public:
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
void VisitCXXConstructExpr(const CXXConstructExpr *E);
void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
- void VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E);
+ void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
void VisitVAArgExpr(VAArgExpr *E);
@@ -177,11 +177,16 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
/// directly into the return value slot. If GC does interfere, a final
/// move will be performed.
void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
- if (!RequiresGCollection) return;
-
- CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr,
+ if (RequiresGCollection) {
+ std::pair<uint64_t, unsigned> TypeInfo =
+ CGF.getContext().getTypeInfo(E->getType());
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr,
Src.getAggregateAddr(),
- E->getType());
+ SizeVal);
+ }
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
@@ -198,9 +203,14 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
}
if (RequiresGCollection) {
+ std::pair<uint64_t, unsigned> TypeInfo =
+ CGF.getContext().getTypeInfo(E->getType());
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
DestPtr, Src.getAggregateAddr(),
- E->getType());
+ SizeVal);
return;
}
// If the result of the assignment is used, copy the LHS there also.
@@ -297,6 +307,10 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
break;
}
+ case CastExpr::CK_LValueBitCast:
+ llvm_unreachable("there are no lvalue bit-casts on aggregates");
+ break;
+
case CastExpr::CK_BitCast: {
// This must be a member function pointer cast.
Visit(E->getSubExpr());
@@ -396,35 +410,11 @@ void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
const llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
-
llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
- llvm::Value *FuncPtr;
-
- if (MD->isVirtual()) {
- int64_t Index = CGF.CGM.getVTables().getMethodVTableIndex(MD);
-
- // FIXME: We shouldn't use / 8 here.
- uint64_t PointerWidthInBytes =
- CGF.CGM.getContext().Target.getPointerWidth(0) / 8;
-
- // Itanium C++ ABI 2.3:
- // For a non-virtual function, this field is a simple function pointer.
- // For a virtual function, it is 1 plus the virtual table offset
- // (in bytes) of the function, represented as a ptrdiff_t.
- FuncPtr = llvm::ConstantInt::get(PtrDiffTy,
- (Index * PointerWidthInBytes) + 1);
- } else {
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty =
- CGF.CGM.getTypes().GetFunctionType(CGF.CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
- llvm::Constant *Fn = CGF.CGM.GetAddrOfFunction(MD, Ty);
- FuncPtr = llvm::ConstantExpr::getPtrToInt(Fn, PtrDiffTy);
- }
+ llvm::Value *FuncPtr = CGF.CGM.GetCXXMemberFunctionPointerValue(MD);
Builder.CreateStore(FuncPtr, DstPtr, VolatileDest);
llvm::Value *AdjPtr = Builder.CreateStructGEP(DestPtr, 1, "dst.adj");
-
// The adjustment will always be 0.
Builder.CreateStore(llvm::ConstantInt::get(PtrDiffTy, 0), AdjPtr,
VolatileDest);
@@ -546,17 +536,15 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Don't make this a live temporary if we're emitting an initializer expr.
if (!IsInitializer)
- CGF.PushCXXTemporary(E->getTemporary(), Val);
+ CGF.EmitCXXTemporary(E->getTemporary(), Val);
}
void
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
llvm::Value *Val = DestPtr;
- if (!Val) {
- // Create a temporary variable.
+ if (!Val) // Create a temporary variable.
Val = CGF.CreateMemTemp(E->getType(), "tmp");
- }
if (E->requiresZeroInitialization())
EmitNullInitializationToLValue(LValue::MakeAddr(Val,
@@ -573,7 +561,7 @@ void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
CGF.EmitCXXExprWithTemporaries(E, Val, VolatileDest, IsInitializer);
}
-void AggExprEmitter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
llvm::Value *Val = DestPtr;
if (!Val) {
@@ -602,7 +590,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
if (isa<ImplicitValueInitExpr>(E)) {
EmitNullInitializationToLValue(LV, T);
} else if (T->isReferenceType()) {
- RValue RV = CGF.EmitReferenceBindingToExpr(E, /*IsInitializer=*/false);
+ RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
CGF.EmitStoreThroughLValue(RV, LV, T);
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
@@ -822,18 +810,11 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// equal, but other compilers do this optimization, and almost every memcpy
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
- if (DestPtr->getType() != BP)
- DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
- if (SrcPtr->getType() != BP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
// Get size and alignment info for this aggregate.
std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
// FIXME: Handle variable sized types.
- const llvm::Type *IntPtr =
- llvm::IntegerType::get(VMContext, LLVMPointerWidth);
// FIXME: If we have a volatile struct, the optimizer can remove what might
// appear to be `extra' memory ops:
@@ -847,25 +828,46 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
//
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
- const llvm::Type *I1Ty = llvm::Type::getInt1Ty(VMContext);
- const llvm::Type *I8Ty = llvm::Type::getInt8Ty(VMContext);
- const llvm::Type *I32Ty = llvm::Type::getInt32Ty(VMContext);
const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
- const llvm::Type *DBP = llvm::PointerType::get(I8Ty, DPT->getAddressSpace());
- if (DestPtr->getType() != DBP)
- DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
+ const llvm::Type *DBP =
+ llvm::Type::getInt8PtrTy(VMContext, DPT->getAddressSpace());
+ DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
- const llvm::Type *SBP = llvm::PointerType::get(I8Ty, SPT->getAddressSpace());
- if (SrcPtr->getType() != SBP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
-
+ const llvm::Type *SBP =
+ llvm::Type::getInt8PtrTy(VMContext, SPT->getAddressSpace());
+ SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
+
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ RecordDecl *Record = RecordTy->getDecl();
+ if (Record->hasObjectMember()) {
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ } else if (getContext().getAsArrayType(Ty)) {
+ QualType BaseType = getContext().getBaseElementType(Ty);
+ if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
+ if (RecordTy->getDecl()->hasObjectMember()) {
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ }
+ }
+
Builder.CreateCall5(CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(),
- IntPtr),
+ IntPtrTy),
DestPtr, SrcPtr,
// TypeInfo.first describes size in bits.
- llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
- llvm::ConstantInt::get(I32Ty, TypeInfo.second/8),
- llvm::ConstantInt::get(I1Ty, isVolatile));
+ llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
+ Builder.getInt32(TypeInfo.second/8),
+ Builder.getInt1(isVolatile));
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
index f93c79c..69e5f0e 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
@@ -275,10 +275,7 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
QualType Ty = E->getType();
- if (ClassDecl->hasObjectMember())
- CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, This, Src, Ty);
- else
- EmitAggregateCopy(This, Src, Ty);
+ EmitAggregateCopy(This, Src, Ty);
return RValue::get(This);
}
}
@@ -484,6 +481,79 @@ static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
return V;
}
+static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
+ llvm::Value *NewPtr) {
+
+ assert(E->getNumConstructorArgs() == 1 &&
+ "Can only have one argument to initializer of POD type.");
+
+ const Expr *Init = E->getConstructorArg(0);
+ QualType AllocType = E->getAllocatedType();
+
+ if (!CGF.hasAggregateLLVMType(AllocType))
+ CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
+ AllocType.isVolatileQualified(), AllocType);
+ else if (AllocType->isAnyComplexType())
+ CGF.EmitComplexExprIntoAddr(Init, NewPtr,
+ AllocType.isVolatileQualified());
+ else
+ CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+}
+
+void
+CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements) {
+ // We have a POD type.
+ if (E->getNumConstructorArgs() == 0)
+ return;
+
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // Create a temporary for the loop index and initialize it with 0.
+ llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ Builder.CreateStore(Zero, IndexPtr);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index < number-of-elements fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
+ "arrayidx");
+ StoreAnyExprIntoOneUnit(*this, E, Address);
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
+ Counter = Builder.CreateLoad(IndexPtr);
+ NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+ Builder.CreateStore(NextVal, IndexPtr);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
llvm::Value *NewPtr,
llvm::Value *NumElements) {
@@ -495,35 +565,32 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
E->constructor_arg_end());
return;
}
+ else {
+ CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
+ return;
+ }
}
-
- QualType AllocType = E->getAllocatedType();
if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+ // Per C++ [expr.new]p15, if we have an initializer, then we're performing
+ // direct initialization. C++ [dcl.init]p5 requires that we
+ // zero-initialize storage if there are no user-declared constructors.
+ if (E->hasInitializer() &&
+ !Ctor->getParent()->hasUserDeclaredConstructor() &&
+ !Ctor->getParent()->isEmpty())
+ CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
+
CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
NewPtr, E->constructor_arg_begin(),
E->constructor_arg_end());
return;
}
-
// We have a POD type.
if (E->getNumConstructorArgs() == 0)
return;
-
- assert(E->getNumConstructorArgs() == 1 &&
- "Can only have one argument to initializer of POD type.");
-
- const Expr *Init = E->getConstructorArg(0);
-
- if (!CGF.hasAggregateLLVMType(AllocType))
- CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
- AllocType.isVolatileQualified(), AllocType);
- else if (AllocType->isAnyComplexType())
- CGF.EmitComplexExprIntoAddr(Init, NewPtr,
- AllocType.isVolatileQualified());
- else
- CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+
+ StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
}
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
@@ -770,7 +837,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
if (const RecordType *RT = DeleteTy->getAs<RecordType>()) {
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
if (!RD->hasTrivialDestructor()) {
- const CXXDestructorDecl *Dtor = RD->getDestructor(getContext());
+ const CXXDestructorDecl *Dtor = RD->getDestructor();
if (E->isArrayForm()) {
llvm::Value *AllocatedObjectPtr;
llvm::Value *NumElements;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
index 0a0c914..0927319 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
@@ -131,14 +131,14 @@ public:
// FIXME: CompoundLiteralExpr
- ComplexPairTy EmitCast(Expr *Op, QualType DestTy);
+ ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy);
ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
// Unlike for scalars, we don't have to worry about function->ptr demotion
// here.
- return EmitCast(E->getSubExpr(), E->getType());
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCastExpr(CastExpr *E) {
- return EmitCast(E->getSubExpr(), E->getType());
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCallExpr(const CallExpr *E);
ComplexPairTy VisitStmtExpr(const StmtExpr *E);
@@ -181,7 +181,7 @@ public:
ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
return CGF.EmitCXXExprWithTemporaries(E).getComplexVal();
}
- ComplexPairTy VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+ ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
@@ -339,11 +339,22 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
return Val;
}
-ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
+ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
+ QualType DestTy) {
// Two cases here: cast from (complex to complex) and (scalar to complex).
if (Op->getType()->isAnyComplexType())
return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+ // FIXME: We should be looking at all of the cast kinds here, not
+ // cherry-picking the ones we have test cases for.
+ if (CK == CastExpr::CK_LValueBitCast) {
+ llvm::Value *V = CGF.EmitLValue(Op).getAddress();
+ V = Builder.CreateBitCast(V,
+ CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
+ // FIXME: Are the qualifiers correct here?
+ return EmitLoadOfComplex(V, DestTy.isVolatileQualified());
+ }
+
// C99 6.3.1.7: When a value of real type is converted to a complex type, the
// real part of the complex result value is determined by the rules of
// conversion to the corresponding real type and the imaginary part of the
@@ -521,22 +532,22 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
// improve codegen a little. It is possible for the RHS to be complex or
// scalar.
OpInfo.Ty = E->getComputationResultType();
- OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty);
+ OpInfo.RHS = EmitCast(CastExpr::CK_Unknown, E->getRHS(), OpInfo.Ty);
- LValue LHSLV = CGF.EmitLValue(E->getLHS());
+ LValue LHS = CGF.EmitLValue(E->getLHS());
// We know the LHS is a complex lvalue.
ComplexPairTy LHSComplexPair;
- if (LHSLV.isPropertyRef())
- LHSComplexPair =
- CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal();
- else if (LHSLV.isKVCRef())
- LHSComplexPair =
- CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal();
+ if (LHS.isPropertyRef())
+ LHSComplexPair =
+ CGF.EmitObjCPropertyGet(LHS.getPropertyRefExpr()).getComplexVal();
+ else if (LHS.isKVCRef())
+ LHSComplexPair =
+ CGF.EmitObjCPropertyGet(LHS.getKVCRefExpr()).getComplexVal();
else
- LHSComplexPair = EmitLoadOfComplex(LHSLV.getAddress(),
- LHSLV.isVolatileQualified());
+ LHSComplexPair = EmitLoadOfComplex(LHS.getAddress(),
+ LHS.isVolatileQualified());
- OpInfo.LHS=EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
+ OpInfo.LHS = EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
// Expand the binary operator.
ComplexPairTy Result = (this->*Func)(OpInfo);
@@ -545,23 +556,26 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
// Store the result value into the LHS lvalue.
- if (LHSLV.isPropertyRef())
- CGF.EmitObjCPropertySet(LHSLV.getPropertyRefExpr(),
+ if (LHS.isPropertyRef())
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
RValue::getComplex(Result));
- else if (LHSLV.isKVCRef())
- CGF.EmitObjCPropertySet(LHSLV.getKVCRefExpr(), RValue::getComplex(Result));
+ else if (LHS.isKVCRef())
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Result));
else
- EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified());
- // And now return the LHS
+ EmitStoreOfComplex(Result, LHS.getAddress(), LHS.isVolatileQualified());
+
+ // Restore the Ignore* flags.
IgnoreReal = ignreal;
IgnoreImag = ignimag;
IgnoreRealAssign = ignreal;
IgnoreImagAssign = ignimag;
- if (LHSLV.isPropertyRef())
- return CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal();
- else if (LHSLV.isKVCRef())
- return CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal();
- return EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified());
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return Result;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
}
ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
@@ -569,8 +583,8 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
TestAndClearIgnoreImag();
bool ignreal = TestAndClearIgnoreRealAssign();
bool ignimag = TestAndClearIgnoreImagAssign();
- assert(CGF.getContext().getCanonicalType(E->getLHS()->getType()) ==
- CGF.getContext().getCanonicalType(E->getRHS()->getType()) &&
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType()) &&
"Invalid assignment");
// Emit the RHS.
ComplexPairTy Val = Visit(E->getRHS());
@@ -578,31 +592,26 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// Compute the address to store into.
LValue LHS = CGF.EmitLValue(E->getLHS());
- // Store into it, if simple.
- if (LHS.isSimple()) {
- EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
-
- // And now return the LHS
- IgnoreReal = ignreal;
- IgnoreImag = ignimag;
- IgnoreRealAssign = ignreal;
- IgnoreImagAssign = ignimag;
- return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
- }
-
- // Otherwise we must have a property setter (no complex vector/bitfields).
+ // Store the result value into the LHS lvalue.
if (LHS.isPropertyRef())
CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Val));
- else
+ else if (LHS.isKVCRef())
CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Val));
+ else
+ EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
- // There is no reload after a store through a method, but we need to restore
- // the Ignore* flags.
+ // Restore the Ignore* flags.
IgnoreReal = ignreal;
IgnoreImag = ignimag;
IgnoreRealAssign = ignreal;
IgnoreImagAssign = ignimag;
- return Val;
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return Val;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
}
ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
index 551a47a..bbd256c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -52,8 +52,8 @@ private:
bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::Constant *InitExpr);
- bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
- llvm::Constant *InitExpr);
+ void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::ConstantInt *InitExpr);
void AppendPadding(uint64_t NumBytes);
@@ -123,14 +123,9 @@ AppendField(const FieldDecl *Field, uint64_t FieldOffset,
return true;
}
-bool ConstStructBuilder::
- AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
- llvm::Constant *InitCst) {
- llvm::ConstantInt *CI = cast_or_null<llvm::ConstantInt>(InitCst);
- // FIXME: Can this ever happen?
- if (!CI)
- return false;
-
+void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
+ uint64_t FieldOffset,
+ llvm::ConstantInt *CI) {
if (FieldOffset > NextFieldOffsetInBytes * 8) {
// We need to add padding.
uint64_t NumBytes =
@@ -195,16 +190,43 @@ bool ConstStructBuilder::
Tmp = Tmp.shl(8 - BitsInPreviousByte);
}
- // Or in the bits that go into the previous byte.
- if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(Elements.back()))
+ // 'or' in the bits that go into the previous byte.
+ llvm::Value *LastElt = Elements.back();
+ if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
Tmp |= Val->getValue();
- else
- assert(isa<llvm::UndefValue>(Elements.back()));
+ else {
+ assert(isa<llvm::UndefValue>(LastElt));
+ // If there is an undef field that we're adding to, it can either be a
+ // scalar undef (in which case, we just replace it with our field) or it
+ // is an array. If it is an array, we have to pull one byte off the
+ // array so that the other undef bytes stay around.
+ if (!isa<llvm::IntegerType>(LastElt->getType())) {
+ // The undef padding will be a multibyte array, create a new smaller
+ // padding and then an hole for our i8 to get plopped into.
+ assert(isa<llvm::ArrayType>(LastElt->getType()) &&
+ "Expected array padding of undefs");
+ const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
+ assert(AT->getElementType()->isIntegerTy(8) &&
+ AT->getNumElements() != 0 &&
+ "Expected non-empty array padding of undefs");
+
+ // Remove the padding array.
+ NextFieldOffsetInBytes -= AT->getNumElements();
+ Elements.pop_back();
+
+ // Add the padding back in two chunks.
+ AppendPadding(AT->getNumElements()-1);
+ AppendPadding(1);
+ assert(isa<llvm::UndefValue>(Elements.back()) &&
+ Elements.back()->getType()->isIntegerTy(8) &&
+ "Padding addition didn't work right");
+ }
+ }
Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
if (FitsCompletelyInPreviousByte)
- return true;
+ return;
}
while (FieldValue.getBitWidth() > 8) {
@@ -248,7 +270,6 @@ bool ConstStructBuilder::
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
FieldValue));
NextFieldOffsetInBytes++;
- return true;
}
void ConstStructBuilder::AppendPadding(uint64_t NumBytes) {
@@ -346,8 +367,8 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
return false;
} else {
// Otherwise we have a bitfield.
- if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
- return false;
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
+ cast<llvm::ConstantInt>(EltInit));
}
}
@@ -443,30 +464,8 @@ public:
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
llvm::Constant *Values[2];
-
- // Get the function pointer (or index if this is a virtual function).
- if (MD->isVirtual()) {
- uint64_t Index = CGM.getVTables().getMethodVTableIndex(MD);
- // FIXME: We shouldn't use / 8 here.
- uint64_t PointerWidthInBytes =
- CGM.getContext().Target.getPointerWidth(0) / 8;
-
- // Itanium C++ ABI 2.3:
- // For a non-virtual function, this field is a simple function pointer.
- // For a virtual function, it is 1 plus the virtual table offset
- // (in bytes) of the function, represented as a ptrdiff_t.
- Values[0] = llvm::ConstantInt::get(PtrDiffTy,
- (Index * PointerWidthInBytes) + 1);
- } else {
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
-
- llvm::Constant *FuncPtr = CGM.GetAddrOfFunction(MD, Ty);
- Values[0] = llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy);
- }
+ Values[0] = CGM.GetCXXMemberFunctionPointerValue(MD);
// The adjustment will always be 0.
Values[1] = llvm::ConstantInt::get(PtrDiffTy, 0);
@@ -930,7 +929,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
llvm::Constant *C = llvm::ConstantInt::get(VMContext,
Result.Val.getInt());
- if (C->getType() == llvm::Type::getInt1Ty(VMContext)) {
+ if (C->getType()->isIntegerTy(1)) {
const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
@@ -977,7 +976,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
}
llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
- if (C && C->getType() == llvm::Type::getInt1Ty(VMContext)) {
+ if (C && C->getType()->isIntegerTy(1)) {
const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
@@ -1009,7 +1008,11 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
// Go through all bases and fill in any null pointer to data members.
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
- assert(!I->isVirtual() && "Should not see virtual bases here!");
+ if (I->isVirtual()) {
+ // FIXME: We should initialize null pointer to data members in virtual
+ // bases here.
+ continue;
+ }
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
@@ -1088,7 +1091,11 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
// Go through all bases and fill in any null pointer to data members.
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
- assert(!I->isVirtual() && "Should not see virtual bases here!");
+ if (I->isVirtual()) {
+ // FIXME: We should initialize null pointer to data members in virtual
+ // bases here.
+ continue;
+ }
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
@@ -1131,6 +1138,11 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
for (RecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I) {
const FieldDecl *FD = *I;
+
+ // Ignore bit fields.
+ if (FD->isBitField())
+ continue;
+
unsigned FieldNo = Layout.getLLVMFieldNo(FD);
Elements[FieldNo] = EmitNullConstant(FD->getType());
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
index 2108414..ef38209 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -40,7 +40,8 @@ struct BinOpInfo {
Value *LHS;
Value *RHS;
QualType Ty; // Computation Type.
- const BinaryOperator *E;
+ BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
+ const Expr *E; // Entire expr, for error unsupported. May not be binop.
};
namespace {
@@ -125,7 +126,7 @@ public:
Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
- Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
+ Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
return EmitNullValue(E->getType());
}
Value *VisitGNUNullExpr(const GNUNullExpr *E) {
@@ -212,22 +213,27 @@ public:
Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
// Unary Operators.
- Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre) {
- LValue LV = EmitLValue(E->getSubExpr());
- return CGF.EmitScalarPrePostIncDec(E, LV, isInc, isPre);
- }
Value *VisitUnaryPostDec(const UnaryOperator *E) {
- return VisitPrePostIncDec(E, false, false);
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, false);
}
Value *VisitUnaryPostInc(const UnaryOperator *E) {
- return VisitPrePostIncDec(E, true, false);
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, false);
}
Value *VisitUnaryPreDec(const UnaryOperator *E) {
- return VisitPrePostIncDec(E, false, true);
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, true);
}
Value *VisitUnaryPreInc(const UnaryOperator *E) {
- return VisitPrePostIncDec(E, true, true);
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, true);
}
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+
+
Value *VisitUnaryAddrOf(const UnaryOperator *E) {
return EmitLValue(E->getSubExpr()).getAddress();
}
@@ -291,9 +297,17 @@ public:
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
- if (CGF.getContext().getLangOptions().OverflowChecking
- && Ops.Ty->isSignedIntegerType())
- return EmitOverflowCheckedBinOp(Ops);
+ if (Ops.Ty->isSignedIntegerType()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
@@ -320,7 +334,7 @@ public:
BinOpInfo EmitBinOps(const BinaryOperator *E);
LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
- Value *&BitFieldResult);
+ Value *&Result);
Value *EmitCompoundAssign(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
@@ -435,8 +449,6 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (DstType->isVoidType()) return 0;
- llvm::LLVMContext &VMContext = CGF.getLLVMContext();
-
// Handle conversions to bool first, they are special: comparisons against 0.
if (DstType->isBooleanType())
return EmitConversionToBool(Src, SrcType);
@@ -458,8 +470,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
// First, convert to the correct width so that we control the kind of
// extension.
- const llvm::Type *MiddleTy =
- llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+ const llvm::Type *MiddleTy = CGF.IntPtrTy;
bool InputSigned = SrcType->isSignedIntegerType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -481,16 +492,14 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
- llvm::Value *Idx =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
for (unsigned i = 0; i < NumElements; i++)
- Args.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 0));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
@@ -578,12 +587,104 @@ Value *ScalarExprEmitter::VisitExpr(Expr *E) {
}
Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
- llvm::SmallVector<llvm::Constant*, 32> indices;
- for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
- indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))));
+ // Vector Mask Case
+ if (E->getNumSubExprs() == 2 ||
+ (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) {
+ Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
+ Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
+ Value *Mask;
+
+ const llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
+ unsigned LHSElts = LTy->getNumElements();
+
+ if (E->getNumSubExprs() == 3) {
+ Mask = CGF.EmitScalarExpr(E->getExpr(2));
+
+ // Shuffle LHS & RHS into one input vector.
+ llvm::SmallVector<llvm::Constant*, 32> concat;
+ for (unsigned i = 0; i != LHSElts; ++i) {
+ concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i));
+ concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i+1));
+ }
+
+ Value* CV = llvm::ConstantVector::get(concat.begin(), concat.size());
+ LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
+ LHSElts *= 2;
+ } else {
+ Mask = RHS;
+ }
+
+ const llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
+ llvm::Constant* EltMask;
+
+ // Treat vec3 like vec4.
+ if ((LHSElts == 6) && (E->getNumSubExprs() == 3))
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts+2))-1);
+ else if ((LHSElts == 3) && (E->getNumSubExprs() == 2))
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts+1))-1);
+ else
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts))-1);
+
+ // Mask off the high bits of each shuffle index.
+ llvm::SmallVector<llvm::Constant *, 32> MaskV;
+ for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i)
+ MaskV.push_back(EltMask);
+
+ Value* MaskBits = llvm::ConstantVector::get(MaskV.begin(), MaskV.size());
+ Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
+
+ // newv = undef
+ // mask = mask & maskbits
+ // for each elt
+ // n = extract mask i
+ // x = extract val n
+ // newv = insert newv, x, i
+ const llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
+ MTy->getNumElements());
+ Value* NewV = llvm::UndefValue::get(RTy);
+ for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
+ Value *Indx = llvm::ConstantInt::get(CGF.Int32Ty, i);
+ Indx = Builder.CreateExtractElement(Mask, Indx, "shuf_idx");
+ Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext");
+
+ // Handle vec3 special since the index will be off by one for the RHS.
+ if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) {
+ Value *cmpIndx, *newIndx;
+ cmpIndx = Builder.CreateICmpUGT(Indx,
+ llvm::ConstantInt::get(CGF.Int32Ty, 3),
+ "cmp_shuf_idx");
+ newIndx = Builder.CreateSub(Indx, llvm::ConstantInt::get(CGF.Int32Ty,1),
+ "shuf_idx_adj");
+ Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx");
+ }
+ Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
+ NewV = Builder.CreateInsertElement(NewV, VExt, Indx, "shuf_ins");
+ }
+ return NewV;
}
+
Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
+
+ // Handle vec3 special since the index will be off by one for the RHS.
+ llvm::SmallVector<llvm::Constant*, 32> indices;
+ for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
+ llvm::Constant *C = cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i)));
+ const llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
+ if (VTy->getNumElements() == 3) {
+ if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) {
+ uint64_t cVal = CI->getZExtValue();
+ if (cVal > 3) {
+ C = llvm::ConstantInt::get(C->getType(), cVal-1);
+ }
+ }
+ }
+ indices.push_back(C);
+ }
+
Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
}
@@ -614,10 +715,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
Value *Base = Visit(E->getBase());
Value *Idx = Visit(E->getIdx());
bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
- Idx = Builder.CreateIntCast(Idx,
- llvm::Type::getInt32Ty(CGF.getLLVMContext()),
- IdxSigned,
- "vecidxcast");
+ Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast");
return Builder.CreateExtractElement(Base, Idx, "vecext");
}
@@ -646,7 +744,6 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
return Visit(E->getInit(0));
unsigned ResElts = VType->getNumElements();
- const llvm::Type *I32Ty = llvm::Type::getInt32Ty(CGF.getLLVMContext());
// Loop over initializers collecting the Value for each, and remembering
// whether the source was swizzle (ExtVectorElementExpr). This will allow
@@ -677,7 +774,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// insert into undef -> shuffle (src, undef)
Args.push_back(C);
for (unsigned j = 1; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(I32Ty));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
LHS = EI->getVectorOperand();
RHS = V;
@@ -686,11 +783,11 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// insert into undefshuffle && size match -> shuffle (v, src)
llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
for (unsigned j = 0; j != CurIdx; ++j)
- Args.push_back(getMaskElt(SVV, j, 0, I32Ty));
- Args.push_back(llvm::ConstantInt::get(I32Ty,
+ Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
ResElts + C->getZExtValue()));
for (unsigned j = CurIdx + 1; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(I32Ty));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
RHS = EI->getVectorOperand();
@@ -704,7 +801,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
}
}
- Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx);
+ Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx);
V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
VIsUndefShuffle = false;
++CurIdx;
@@ -728,15 +825,15 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// this shuffle directly into it.
if (VIsUndefShuffle) {
Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
- I32Ty));
+ CGF.Int32Ty));
} else {
- Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
}
}
for (unsigned j = 0, je = InitElts; j != je; ++j)
- Args.push_back(getMaskElt(SVI, j, Offset, I32Ty));
+ Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(I32Ty));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
if (VIsUndefShuffle)
V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
@@ -749,20 +846,20 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// to the vector initializer into V.
if (Args.empty()) {
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
for (unsigned j = InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(I32Ty));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
Mask, "vext");
Args.clear();
for (unsigned j = 0; j != CurIdx; ++j)
- Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(llvm::ConstantInt::get(I32Ty, j+Offset));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j+Offset));
for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(I32Ty));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
}
// If V is undef, make sure it ends up on the RHS of the shuffle to aid
@@ -781,7 +878,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Emit remaining default initializers
for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
- Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx);
+ Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx);
llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
}
@@ -828,6 +925,15 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
//assert(0 && "Unknown cast kind!");
break;
+ case CastExpr::CK_LValueBitCast: {
+ Value *V = EmitLValue(E).getAddress();
+ V = Builder.CreateBitCast(V,
+ ConvertType(CGF.getContext().getPointerType(DestTy)));
+ // FIXME: Are the qualifiers correct here?
+ return EmitLoadOfLValue(LValue::MakeAddr(V, CGF.MakeQualifiers(DestTy)),
+ DestTy);
+ }
+
case CastExpr::CK_AnyPointerToObjCPointerCast:
case CastExpr::CK_AnyPointerToBlockPointerCast:
case CastExpr::CK_BitCast: {
@@ -905,13 +1011,13 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
std::swap(DerivedDecl, BaseDecl);
if (llvm::Constant *Adj =
- CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
- CE->getBasePath())) {
+ CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, CE->getBasePath())){
if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
- Src = Builder.CreateSub(Src, Adj, "adj");
+ Src = Builder.CreateNSWSub(Src, Adj, "adj");
else
- Src = Builder.CreateAdd(Src, Adj, "adj");
+ Src = Builder.CreateNSWAdd(Src, Adj, "adj");
}
+
return Src;
}
@@ -924,8 +1030,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// First, convert to the correct width so that we control the kind of
// extension.
- const llvm::Type *MiddleTy =
- llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+ const llvm::Type *MiddleTy = CGF.IntPtrTy;
bool InputSigned = E->getType()->isSignedIntegerType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -946,16 +1051,14 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
- llvm::Value *Idx =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
for (unsigned i = 0; i < NumElements; i++)
- Args.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 0));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
@@ -1020,12 +1123,126 @@ Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
// Unary Operators
//===----------------------------------------------------------------------===//
+llvm::Value *ScalarExprEmitter::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+
+ QualType ValTy = E->getSubExpr()->getType();
+ llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy);
+
+ int AmountVal = isInc ? 1 : -1;
+
+ if (ValTy->isPointerType() &&
+ ValTy->getAs<PointerType>()->isVariableArrayType()) {
+ // The amount of the addition/subtraction needs to account for the VLA size
+ CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
+ }
+
+ llvm::Value *NextVal;
+ if (const llvm::PointerType *PT =
+ dyn_cast<llvm::PointerType>(InVal->getType())) {
+ llvm::Constant *Inc = llvm::ConstantInt::get(CGF.Int32Ty, AmountVal);
+ if (!isa<llvm::FunctionType>(PT->getElementType())) {
+ QualType PTEE = ValTy->getPointeeType();
+ if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) {
+ // Handle interface types, which are not represented with a concrete
+ // type.
+ int size = CGF.getContext().getTypeSize(OIT) / 8;
+ if (!isInc)
+ size = -size;
+ Inc = llvm::ConstantInt::get(Inc->getType(), size);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ InVal = Builder.CreateBitCast(InVal, i8Ty);
+ NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
+ llvm::Value *lhs = LV.getAddress();
+ lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
+ LV = LValue::MakeAddr(lhs, CGF.MakeQualifiers(ValTy));
+ } else
+ NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
+ } else {
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
+ NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
+ NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
+ }
+ } else if (InVal->getType()->isIntegerTy(1) && isInc) {
+ // Bool++ is an interesting case, due to promotion rules, we get:
+ // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
+ // Bool = ((int)Bool+1) != 0
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ NextVal = llvm::ConstantInt::getTrue(VMContext);
+ } else if (isa<llvm::IntegerType>(InVal->getType())) {
+ NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
+
+ if (!ValTy->isSignedIntegerType())
+ // Unsigned integer inc is always two's complement.
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ else {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ break;
+ case LangOptions::SOB_Defined:
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ break;
+ case LangOptions::SOB_Trapping:
+ BinOpInfo BinOp;
+ BinOp.LHS = InVal;
+ BinOp.RHS = NextVal;
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BinaryOperator::Add;
+ BinOp.E = E;
+ return EmitOverflowCheckedBinOp(BinOp);
+ }
+ }
+ } else {
+ // Add the inc/dec to the real part.
+ if (InVal->getType()->isFloatTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(AmountVal)));
+ else if (InVal->getType()->isDoubleTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(AmountVal)));
+ else {
+ llvm::APFloat F(static_cast<float>(AmountVal));
+ bool ignored;
+ F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
+ &ignored);
+ NextVal = llvm::ConstantFP::get(VMContext, F);
+ }
+ NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ }
+
+ // Store the updated result through the lvalue.
+ if (LV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? NextVal : InVal;
+}
+
+
+
Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
TestAndClearIgnoreResultAssign();
- Value *Op = Visit(E->getSubExpr());
- if (Op->getType()->isFPOrFPVectorTy())
- return Builder.CreateFNeg(Op, "neg");
- return Builder.CreateNeg(Op, "neg");
+ // Emit unary minus with EmitSub so we handle overflow cases etc.
+ BinOpInfo BinOp;
+ BinOp.RHS = Visit(E->getSubExpr());
+
+ if (BinOp.RHS->getType()->isFPOrFPVectorTy())
+ BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
+ else
+ BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BinaryOperator::Sub;
+ BinOp.E = E;
+ return EmitSub(BinOp);
}
Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
@@ -1126,6 +1343,7 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
Result.LHS = Visit(E->getLHS());
Result.RHS = Visit(E->getRHS());
Result.Ty = E->getType();
+ Result.Opcode = E->getOpcode();
Result.E = E;
return Result;
}
@@ -1133,9 +1351,8 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
LValue ScalarExprEmitter::EmitCompoundAssignLValue(
const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
- Value *&BitFieldResult) {
+ Value *&Result) {
QualType LHSTy = E->getLHS()->getType();
- BitFieldResult = 0;
BinOpInfo OpInfo;
if (E->getComputationResultType()->isAnyComplexType()) {
@@ -1144,7 +1361,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// actually need the imaginary part of the RHS for multiplication and
// division.)
CGF.ErrorUnsupported(E, "complex compound assignment");
- llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+ Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
return LValue();
}
@@ -1152,6 +1369,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// first, plus this should improve codegen a little.
OpInfo.RHS = Visit(E->getRHS());
OpInfo.Ty = E->getComputationResultType();
+ OpInfo.Opcode = E->getOpcode();
OpInfo.E = E;
// Load/convert the LHS.
LValue LHSLV = EmitCheckedLValue(E->getLHS());
@@ -1160,7 +1378,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
E->getComputationLHSType());
// Expand the binary operator.
- Value *Result = (this->*Func)(OpInfo);
+ Result = (this->*Func)(OpInfo);
// Convert the result back to the LHS type.
Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
@@ -1169,30 +1387,35 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// specially because the result is altered by the store, i.e., [C99 6.5.16p1]
// 'An assignment expression has the value of the left operand after the
// assignment...'.
- if (LHSLV.isBitField()) {
- if (!LHSLV.isVolatileQualified()) {
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
- &Result);
- BitFieldResult = Result;
- return LHSLV;
- } else
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy);
- } else
+ if (LHSLV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
+ &Result);
+ else
CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
+
return LHSLV;
}
Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
bool Ignore = TestAndClearIgnoreResultAssign();
- Value *BitFieldResult;
- LValue LHSLV = EmitCompoundAssignLValue(E, Func, BitFieldResult);
- if (BitFieldResult)
- return BitFieldResult;
-
+ Value *RHS;
+ LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
+
+ // If the result is clearly ignored, return now.
if (Ignore)
return 0;
- return EmitLoadOfLValue(LHSLV, E->getType());
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfLValue(LHS, E->getType());
}
@@ -1217,7 +1440,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
unsigned IID;
unsigned OpID = 0;
- switch (Ops.E->getOpcode()) {
+ switch (Ops.Opcode) {
case BinaryOperator::Add:
case BinaryOperator::AddAssign:
OpID = 1;
@@ -1265,20 +1488,20 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
// long long *__overflow_handler)(long long a, long long b, char op,
// char width)
std::vector<const llvm::Type*> handerArgTypes;
- handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext));
- handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext));
+ handerArgTypes.push_back(CGF.Int64Ty);
+ handerArgTypes.push_back(CGF.Int64Ty);
handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
- llvm::FunctionType *handlerTy = llvm::FunctionType::get(
- llvm::Type::getInt64Ty(VMContext), handerArgTypes, false);
+ llvm::FunctionType *handlerTy =
+ llvm::FunctionType::get(CGF.Int64Ty, handerArgTypes, false);
llvm::Value *handlerFunction =
CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
llvm::PointerType::getUnqual(handlerTy));
handlerFunction = Builder.CreateLoad(handlerFunction);
llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
- Builder.CreateSExt(Ops.LHS, llvm::Type::getInt64Ty(VMContext)),
- Builder.CreateSExt(Ops.RHS, llvm::Type::getInt64Ty(VMContext)),
+ Builder.CreateSExt(Ops.LHS, CGF.Int64Ty),
+ Builder.CreateSExt(Ops.RHS, CGF.Int64Ty),
llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID),
llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext),
cast<llvm::IntegerType>(opTy)->getBitWidth()));
@@ -1300,49 +1523,56 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
if (!Ops.Ty->isAnyPointerType()) {
- if (CGF.getContext().getLangOptions().OverflowChecking &&
- Ops.Ty->isSignedIntegerType())
- return EmitOverflowCheckedBinOp(Ops);
-
+ if (Ops.Ty->isSignedIntegerType()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
- // Signed integer overflow is undefined behavior.
- if (Ops.Ty->isSignedIntegerType())
- return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
-
return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
}
+ // Must have binary (not unary) expr here. Unary pointer decrement doesn't
+ // use this path.
+ const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
+
if (Ops.Ty->isPointerType() &&
Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
// The amount of the addition needs to account for the VLA size
- CGF.ErrorUnsupported(Ops.E, "VLA pointer addition");
+ CGF.ErrorUnsupported(BinOp, "VLA pointer addition");
}
+
Value *Ptr, *Idx;
Expr *IdxExp;
- const PointerType *PT = Ops.E->getLHS()->getType()->getAs<PointerType>();
+ const PointerType *PT = BinOp->getLHS()->getType()->getAs<PointerType>();
const ObjCObjectPointerType *OPT =
- Ops.E->getLHS()->getType()->getAs<ObjCObjectPointerType>();
+ BinOp->getLHS()->getType()->getAs<ObjCObjectPointerType>();
if (PT || OPT) {
Ptr = Ops.LHS;
Idx = Ops.RHS;
- IdxExp = Ops.E->getRHS();
+ IdxExp = BinOp->getRHS();
} else { // int + pointer
- PT = Ops.E->getRHS()->getType()->getAs<PointerType>();
- OPT = Ops.E->getRHS()->getType()->getAs<ObjCObjectPointerType>();
+ PT = BinOp->getRHS()->getType()->getAs<PointerType>();
+ OPT = BinOp->getRHS()->getType()->getAs<ObjCObjectPointerType>();
assert((PT || OPT) && "Invalid add expr");
Ptr = Ops.RHS;
Idx = Ops.LHS;
- IdxExp = Ops.E->getLHS();
+ IdxExp = BinOp->getLHS();
}
unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
if (Width < CGF.LLVMPointerWidth) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
- const llvm::Type *IdxType =
- llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+ const llvm::Type *IdxType = CGF.IntPtrTy;
if (IdxExp->getType()->isSignedIntegerType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
@@ -1376,30 +1606,37 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
- if (CGF.getContext().getLangOptions().OverflowChecking
- && Ops.Ty->isSignedIntegerType())
- return EmitOverflowCheckedBinOp(Ops);
-
+ if (Ops.Ty->isSignedIntegerType()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
- // Signed integer overflow is undefined behavior.
- if (Ops.Ty->isSignedIntegerType())
- return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
-
return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
}
- if (Ops.E->getLHS()->getType()->isPointerType() &&
- Ops.E->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
+ // Must have binary (not unary) expr here. Unary pointer increment doesn't
+ // use this path.
+ const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
+
+ if (BinOp->getLHS()->getType()->isPointerType() &&
+ BinOp->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
// The amount of the addition needs to account for the VLA size for
// ptr-int
// The amount of the division needs to account for the VLA size for
// ptr-ptr.
- CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction");
+ CGF.ErrorUnsupported(BinOp, "VLA pointer subtraction");
}
- const QualType LHSType = Ops.E->getLHS()->getType();
+ const QualType LHSType = BinOp->getLHS()->getType();
const QualType LHSElementType = LHSType->getPointeeType();
if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
// pointer - int
@@ -1408,9 +1645,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (Width < CGF.LLVMPointerWidth) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
- const llvm::Type *IdxType =
- llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
- if (Ops.E->getRHS()->getType()->isSignedIntegerType())
+ const llvm::Type *IdxType = CGF.IntPtrTy;
+ if (BinOp->getRHS()->getType()->isSignedIntegerType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
@@ -1615,17 +1851,25 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// because the result is altered by the store, i.e., [C99 6.5.16p1]
// 'An assignment expression has the value of the left operand after
// the assignment...'.
- if (LHS.isBitField()) {
- if (!LHS.isVolatileQualified()) {
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
- &RHS);
- return RHS;
- } else
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType());
- } else
+ if (LHS.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
+ &RHS);
+ else
CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+
+ // If the result is clearly ignored, return now.
if (Ignore)
return 0;
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
return EmitLoadOfLValue(LHS, E->getType());
}
@@ -1925,6 +2169,13 @@ Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
DstTy);
}
+
+llvm::Value *CodeGenFunction::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
+}
+
LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
llvm::Value *V;
// object->isa or (*object).isa
@@ -1958,12 +2209,12 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
const CompoundAssignOperator *E) {
ScalarExprEmitter Scalar(*this);
- Value *BitFieldResult = 0;
+ Value *Result = 0;
switch (E->getOpcode()) {
#define COMPOUND_OP(Op) \
case BinaryOperator::Op##Assign: \
return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
- BitFieldResult)
+ Result)
COMPOUND_OP(Mul);
COMPOUND_OP(Div);
COMPOUND_OP(Rem);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
index 7c842a9..e735a61 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -90,11 +90,14 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
CallArgList Args;
EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
+ QualType ResultType =
+ E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+
if (isSuperMessage) {
// super is only valid in an Objective-C method
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- return Runtime.GenerateMessageSendSuper(*this, Return, E->getType(),
+ return Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
E->getSelector(),
OMD->getClassInterface(),
isCategoryImpl,
@@ -104,7 +107,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
E->getMethodDecl());
}
- return Runtime.GenerateMessageSend(*this, Return, E->getType(),
+ return Runtime.GenerateMessageSend(*this, Return, ResultType,
E->getSelector(),
Receiver, Args, OID,
E->getMethodDecl());
@@ -458,7 +461,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
LoadObjCSelf(), Ivar, 0);
const RecordType *RT = FieldType->getAs<RecordType>();
CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor(getContext());
+ CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor();
if (!Dtor->isTrivial()) {
if (Array) {
const llvm::Type *BasePtr = ConvertType(FieldType);
@@ -595,7 +598,8 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
Args);
} else if (const ObjCImplicitSetterGetterRefExpr *E =
dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) {
- Selector S = E->getSetterMethod()->getSelector();
+ const ObjCMethodDecl *SetterMD = E->getSetterMethod();
+ Selector S = SetterMD->getSelector();
CallArgList Args;
llvm::Value *Receiver;
if (E->getInterfaceDecl()) {
@@ -606,7 +610,8 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
return;
} else
Receiver = EmitScalarExpr(E->getBase());
- Args.push_back(std::make_pair(Src, E->getType()));
+ ObjCMethodDecl::param_iterator P = SetterMD->param_begin();
+ Args.push_back(std::make_pair(Src, (*P)->getType()));
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().VoidTy, S,
Receiver,
@@ -778,8 +783,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::ConstantInt::get(UnsignedLongLTy, 1));
Builder.CreateStore(Counter, CounterPtr);
- llvm::BasicBlock *LoopEnd = createBasicBlock("loopend");
- llvm::BasicBlock *AfterBody = createBasicBlock("afterbody");
+ JumpDest LoopEnd = getJumpDestInCurrentScope("loopend");
+ JumpDest AfterBody = getJumpDestInCurrentScope("afterbody");
BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
@@ -787,7 +792,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
BreakContinueStack.pop_back();
- EmitBlock(AfterBody);
+ EmitBlock(AfterBody.Block);
llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
@@ -823,11 +828,11 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
LV.getAddress());
}
- EmitBlock(LoopEnd);
+ EmitBlock(LoopEnd.Block);
}
void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
- CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+ CGM.getObjCRuntime().EmitTryStmt(*this, S);
}
void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
@@ -836,7 +841,9 @@ void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
void CodeGenFunction::EmitObjCAtSynchronizedStmt(
const ObjCAtSynchronizedStmt &S) {
- CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+ CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
}
CGObjCRuntime::~CGObjCRuntime() {}
+
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
index 6c25afe..f3c80bc 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -17,6 +17,7 @@
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
+#include "CGException.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -162,7 +163,8 @@ public:
const ObjCMethodDecl *Method);
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *OID);
- virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval = false);
virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
*Method);
@@ -179,8 +181,10 @@ public:
virtual llvm::Function *GetCopyStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S);
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
@@ -197,7 +201,7 @@ public:
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
- QualType Ty);
+ llvm::Value *Size);
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
llvm::Value *BaseValue,
@@ -360,14 +364,16 @@ llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
return Builder.CreateCall(ClassLookupFn, ClassName);
}
-llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel) {
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval) {
llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()];
if (US == 0)
US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy),
llvm::GlobalValue::PrivateLinkage,
".objc_untyped_selector_alias"+Sel.getAsString(),
NULL, &TheModule);
-
+ if (lval)
+ return US;
return Builder.CreateLoad(US);
}
@@ -624,8 +630,8 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
// to be on the stack / in those registers at the time) on most platforms,
// and generates a SegV on SPARC. With LLVM it corrupts the stack.
bool isPointerSizedReturn = false;
- if (ResultType->isAnyPointerType() || ResultType->isIntegralType() ||
- ResultType->isVoidType())
+ if (ResultType->isAnyPointerType() ||
+ ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType())
isPointerSizedReturn = true;
llvm::BasicBlock *startBB = 0;
@@ -1848,245 +1854,167 @@ llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
-void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S) {
- // Pointer to the personality function
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
- true),
- "__gnu_objc_personality_v0");
- Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy);
- std::vector<const llvm::Type*> Params;
- Params.push_back(PtrTy);
- llvm::Value *RethrowFn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- Params, false), "_Unwind_Resume");
-
- bool isTry = isa<ObjCAtTryStmt>(S);
- llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
- llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
- llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
- llvm::BasicBlock *CatchInCatch = CGF.createBasicBlock("catch.rethrow");
- llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
- llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
-
- // @synchronized()
- if (!isTry) {
- std::vector<const llvm::Type*> Args(1, IdTy);
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
- llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
- llvm::Value *SyncArg =
- CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
- CGF.Builder.CreateCall(SyncEnter, SyncArg);
- }
+void CGObjCGNU::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ // Evaluate the lock operand. This should dominate the cleanup.
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(S.getSynchExpr());
- // Push an EH context entry, used for handling rethrows and jumps
- // through finally.
- CGF.PushCleanupBlock(FinallyBlock);
-
- // Emit the statements in the @try {} block
- CGF.setInvokeDest(TryHandler);
-
- CGF.EmitBlock(TryBlock);
- CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
- : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
-
- // Jump to @finally if there is no exception
- CGF.EmitBranchThroughCleanup(FinallyEnd);
-
- // Emit the handlers
- CGF.EmitBlock(TryHandler);
-
- // Get the correct versions of the exception handling intrinsics
- llvm::Value *llvm_eh_exception =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- llvm::Value *llvm_eh_typeid_for =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
-
- // Exception object
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
-
- llvm::SmallVector<llvm::Value*, 8> ESelArgs;
- llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers;
-
- ESelArgs.push_back(Exc);
- ESelArgs.push_back(Personality);
-
- bool HasCatchAll = false;
- // Only @try blocks are allowed @catch blocks, but both can have @finally
- if (isTry) {
- if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
- const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S);
- CGF.setInvokeDest(CatchInCatch);
-
- for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) {
- const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I);
- const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- Handlers.push_back(std::make_pair(CatchDecl,
- CatchStmt->getCatchBody()));
-
- // @catch() and @catch(id) both catch any ObjC exception
- if (!CatchDecl || CatchDecl->getType()->isObjCIdType()
- || CatchDecl->getType()->isObjCQualifiedIdType()) {
- // Use i8* null here to signal this is a catch all, not a cleanup.
- ESelArgs.push_back(NULLPtr);
- HasCatchAll = true;
- // No further catches after this one will ever by reached
- break;
- }
-
- // All other types should be Objective-C interface pointer types.
- const ObjCObjectPointerType *OPT =
- CatchDecl->getType()->getAs<ObjCObjectPointerType>();
- assert(OPT && "Invalid @catch type.");
- const ObjCInterfaceDecl *IDecl =
- OPT->getObjectType()->getInterface();
- assert(IDecl && "Invalid @catch type.");
- llvm::Value *EHType =
- MakeConstantString(IDecl->getNameAsString());
- ESelArgs.push_back(EHType);
- }
- }
- }
+ // Acquire the lock.
+ llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncEnter, SyncArg);
- // We use a cleanup unless there was already a catch all.
- if (!HasCatchAll) {
- ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
- Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ // Register an all-paths cleanup to release the lock.
+ {
+ CodeGenFunction::CleanupBlock ReleaseScope(CGF, NormalAndEHCleanup);
+
+ llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncExit, SyncArg);
}
- // Find which handler was matched.
- llvm::Value *ESelector = CGF.Builder.CreateCall(llvm_eh_selector,
- ESelArgs.begin(), ESelArgs.end(), "selector");
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
- for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
- const VarDecl *CatchParam = Handlers[i].first;
- const Stmt *CatchBody = Handlers[i].second;
+ // Pop the lock-release cleanup.
+ CGF.PopCleanupBlock();
+}
- llvm::BasicBlock *Next = 0;
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
+}
- // The last handler always matches.
- if (i + 1 != e) {
- assert(CatchParam && "Only last handler can be a catch all.");
+void CGObjCGNU::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Unlike the Apple non-fragile runtimes, which also uses
+ // unwind-based zero cost exceptions, the GNU Objective C runtime's
+ // EH support isn't a veneer over C++ EH. Instead, exception
+ // objects are created by __objc_exception_throw and destroyed by
+ // the personality function; this avoids the need for bracketing
+ // catch handlers with calls to __blah_begin_catch/__blah_end_catch
+ // (or even _Unwind_DeleteException), but probably doesn't
+ // interoperate very well with foreign exceptions.
+
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ // We handle @finally statements by pushing them as a cleanup
+ // before entering the catch.
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::Constant *Rethrow =
+ CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
- // Test whether this block matches the type for the selector and branch
- // to Match if it does, or to the next BB if it doesn't.
- llvm::BasicBlock *Match = CGF.createBasicBlock("match");
- Next = CGF.createBasicBlock("catch.next");
- llvm::Value *Id = CGF.Builder.CreateCall(llvm_eh_typeid_for,
- CGF.Builder.CreateBitCast(ESelArgs[i+2], PtrTy));
- CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(ESelector, Id), Match,
- Next);
+ FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(), 0, 0,
+ Rethrow);
+ }
- CGF.EmitBlock(Match);
- }
+ llvm::SmallVector<CatchHandler, 8> Handlers;
- if (CatchBody) {
- llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc,
- CGF.ConvertType(CatchParam->getType()));
-
- // Bind the catch parameter if it exists.
- if (CatchParam) {
- // CatchParam is a ParmVarDecl because of the grammar
- // construction used to handle this, but for codegen purposes
- // we treat this as a local decl.
- CGF.EmitLocalBlockVarDecl(*CatchParam);
- CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
- }
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- CGF.ObjCEHValueStack.push_back(ExcObject);
- CGF.EmitStmt(CatchBody);
- CGF.ObjCEHValueStack.pop_back();
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
- CGF.EmitBranchThroughCleanup(FinallyEnd);
+ // @catch() and @catch(id) both catch any ObjC exception.
+ // Treat them as catch-alls.
+ // FIXME: this is what this code was doing before, but should 'id'
+ // really be catching foreign exceptions?
+ if (!CatchDecl
+ || CatchDecl->getType()->isObjCIdType()
+ || CatchDecl->getType()->isObjCQualifiedIdType()) {
- if (Next)
- CGF.EmitBlock(Next);
- } else {
- assert(!Next && "catchup should be last handler.");
+ Handler.TypeInfo = 0; // catch-all
+
+ // Don't consider any other catches.
+ break;
+ }
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT =
+ CatchDecl->getType()->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl =
+ OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ Handler.TypeInfo = MakeConstantString(IDecl->getNameAsString());
}
+
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
}
- // The @finally block is a secondary landing pad for any exceptions thrown in
- // @catch() blocks
- CGF.EmitBlock(CatchInCatch);
- Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- ESelArgs.clear();
- ESelArgs.push_back(Exc);
- ESelArgs.push_back(Personality);
- // If there is a @catch or @finally clause in outside of this one then we
- // need to make sure that we catch and rethrow it.
- if (PrevLandingPad) {
- ESelArgs.push_back(NULLPtr);
- } else {
- ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
- }
- CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(),
- "selector");
- CGF.Builder.CreateCall(llvm_eh_typeid_for,
- CGF.Builder.CreateIntToPtr(ESelArgs[2], PtrTy));
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.EHStack.popCatch();
- CGF.setInvokeDest(PrevLandingPad);
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
- CGF.EmitBlock(FinallyBlock);
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
- if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
- cast<ObjCAtTryStmt>(S).getFinallyStmt())
- CGF.EmitStmt(FinallyStmt->getFinallyBody());
- } else {
- // Emit 'objc_sync_exit(expr)' as finally's sole statement for
- // @synchronized.
- std::vector<const llvm::Type*> Args(1, IdTy);
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
- llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
- llvm::Value *SyncArg =
- CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
- CGF.Builder.CreateCall(SyncExit, SyncArg);
- }
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ Exn = CGF.Builder.CreateBitCast(Exn, CatchType);
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(Exn, CGF.GetAddrOfLocalVar(CatchParam));
+ }
- // Branch around the rethrow code.
- CGF.EmitBranch(FinallyEnd);
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
- CGF.EmitBlock(FinallyRethrow);
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
- llvm::Value *ExceptionObject = CGF.Builder.CreateLoad(RethrowPtr);
- llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
- if (!UnwindBB) {
- CGF.Builder.CreateCall(RethrowFn, ExceptionObject);
- // Exception always thrown, next instruction is never reached.
- CGF.Builder.CreateUnreachable();
- } else {
- // If there is a @catch block outside this scope, we invoke instead of
- // calling because we may return to this function. This is very slow, but
- // some people still do it. It would be nice to add an optimised path for
- // this.
- CGF.Builder.CreateInvoke(RethrowFn, UnwindBB, UnwindBB, &ExceptionObject,
- &ExceptionObject+1);
- }
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
+
+ // Pop out of the finally.
+ if (S.getFinallyStmt())
+ CGF.ExitFinallyBlock(FinallyInfo);
- CGF.EmitBlock(FinallyEnd);
+ if (Cont.Block) {
+ if (Cont.Block->use_empty())
+ delete Cont.Block;
+ else {
+ CGF.EmitBranch(Cont.Block);
+ CGF.EmitBlock(Cont.Block);
+ }
+ }
}
void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
@@ -2174,17 +2102,12 @@ void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCGNU::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
- QualType Ty) {
+ llvm::Value *Size) {
CGBuilderTy B = CGF.Builder;
DestPtr = EnforceType(B, DestPtr, IdTy);
SrcPtr = EnforceType(B, SrcPtr, PtrToIdTy);
- std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
- unsigned long size = TypeInfo.first/8;
- // FIXME: size_t
- llvm::Value *N = llvm::ConstantInt::get(LongTy, size);
-
- B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, N);
+ B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, Size);
}
llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
index d3bafd7..01ead9e 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
@@ -16,13 +16,14 @@
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
+#include "CGException.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/LangOptions.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
@@ -31,6 +32,7 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
#include <cstdio>
@@ -440,6 +442,15 @@ public:
return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
}
+ /// ExceptionRethrowFn - LLVM objc_exception_rethrow function.
+ llvm::Constant *getExceptionRethrowFn() {
+ // void objc_exception_rethrow(void)
+ std::vector<const llvm::Type*> Args;
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, true);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow");
+ }
+
/// SyncEnterFn - LLVM object_sync_enter function.
llvm::Constant *getSyncEnterFn() {
// void objc_sync_enter (id)
@@ -843,6 +854,9 @@ protected:
/// MethodVarNames - uniqued method variable names.
llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
+ /// DefinedCategoryNames - list of category names in form Class_Category.
+ llvm::SetVector<std::string> DefinedCategoryNames;
+
/// MethodVarTypes - uniqued method type signatures. We have to use
/// a StringMap here because have no other unique reference.
llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
@@ -1120,7 +1134,8 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
- llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval=false);
public:
CGObjCMac(CodeGen::CodeGenModule &cgm);
@@ -1151,7 +1166,8 @@ public:
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
- virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval = false);
/// The NeXT/Apple runtimes do not support typed selectors; just emit an
/// untyped one.
@@ -1170,8 +1186,11 @@ public:
virtual llvm::Constant *GetCopyStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S);
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
@@ -1187,7 +1206,7 @@ public:
llvm::Value *src, llvm::Value *dest);
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *dest, llvm::Value *src,
- QualType Ty);
+ llvm::Value *size);
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
@@ -1319,7 +1338,8 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
- llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval=false);
/// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
/// interface. The return value has type EHTypePtrTy.
@@ -1382,8 +1402,9 @@ public:
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
- virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel)
- { return EmitSelector(Builder, Sel); }
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lvalue = false)
+ { return EmitSelector(Builder, Sel, lvalue); }
/// The NeXT/Apple runtimes do not support typed selectors; just emit an
/// untyped one.
@@ -1412,8 +1433,10 @@ public:
return ObjCTypes.getEnumerationMutationFn();
}
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S);
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
@@ -1429,7 +1452,7 @@ public:
llvm::Value *src, llvm::Value *dest);
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *dest, llvm::Value *src,
- QualType Ty);
+ llvm::Value *size);
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
llvm::Value *BaseValue,
@@ -1483,8 +1506,9 @@ llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder,
}
/// GetSelector - Return the pointer to the unique'd string for this selector.
-llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel) {
- return EmitSelector(Builder, Sel);
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval) {
+ return EmitSelector(Builder, Sel, lval);
}
llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
*Method) {
@@ -1620,30 +1644,23 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
const llvm::FunctionType *FTy =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+ if (Method)
+ assert(CGM.getContext().getCanonicalType(Method->getResultType()) ==
+ CGM.getContext().getCanonicalType(ResultType) &&
+ "Result type mismatch!");
+
llvm::Constant *Fn = NULL;
- if (CGM.ReturnTypeUsesSret(FnInfo)) {
+ if (CGM.ReturnTypeUsesSRet(FnInfo)) {
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
: ObjCTypes.getSendStretFn(IsSuper);
- } else if (ResultType->isFloatingType()) {
- if (ObjCABI == 2) {
- if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
- BuiltinType::Kind k = BT->getKind();
- Fn = (k == BuiltinType::LongDouble) ? ObjCTypes.getSendFpretFn2(IsSuper)
- : ObjCTypes.getSendFn2(IsSuper);
- } else {
- Fn = ObjCTypes.getSendFn2(IsSuper);
- }
- } else
- // FIXME. This currently matches gcc's API for x86-32. May need to change
- // for others if we have their API.
- Fn = ObjCTypes.getSendFpretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper)
+ : ObjCTypes.getSendFpretFn(IsSuper);
} else {
Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
: ObjCTypes.getSendFn(IsSuper);
}
- assert(Fn && "EmitLegacyMessageSend - unknown API");
- Fn = llvm::ConstantExpr::getBitCast(Fn,
- llvm::PointerType::getUnqual(FTy));
+ Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy));
return CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
}
@@ -1909,10 +1926,18 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(llvm::Twine Name,
Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
Prop));
}
- if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
for (ObjCInterfaceDecl::protocol_iterator P = OID->protocol_begin(),
E = OID->protocol_end(); P != E; ++P)
- PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
+ PushProtocolProperties(PropertySet, Properties, Container, (*P),
+ ObjCTypes);
+ }
+ else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) {
+ for (ObjCCategoryDecl::protocol_iterator P = CD->protocol_begin(),
+ E = CD->protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P),
+ ObjCTypes);
+ }
// Return null for empty list.
if (Properties.empty())
@@ -2049,6 +2074,7 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
"__OBJC,__category,regular,no_dead_strip",
4, true);
DefinedCategories.push_back(GV);
+ DefinedCategoryNames.insert(ExtName.str());
}
// FIXME: Get from somewhere?
@@ -2494,11 +2520,52 @@ llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
return ObjCTypes.getEnumerationMutationFn();
}
+void CGObjCMac::EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
/*
Objective-C setjmp-longjmp (sjlj) Exception Handling
--
+ A catch buffer is a setjmp buffer plus:
+ - a pointer to the exception that was caught
+ - a pointer to the previous exception data buffer
+ - two pointers of reserved storage
+ Therefore catch buffers form a stack, with a pointer to the top
+ of the stack kept in thread-local storage.
+
+ objc_exception_try_enter pushes a catch buffer onto the EH stack.
+ objc_exception_try_exit pops the given catch buffer, which is
+ required to be the top of the EH stack.
+ objc_exception_throw pops the top of the EH stack, writes the
+ thrown exception into the appropriate field, and longjmps
+ to the setjmp buffer. It crashes the process (with a printf
+ and an abort()) if there are no catch buffers on the stack.
+ objc_exception_extract just reads the exception pointer out of the
+ catch buffer.
+
+ There's no reason an implementation couldn't use a light-weight
+ setjmp here --- something like __builtin_setjmp, but API-compatible
+ with the heavyweight setjmp. This will be more important if we ever
+ want to implement correct ObjC/C++ exception interactions for the
+ fragile ABI.
+
+ Note that for this use of setjmp/longjmp to be correct, we may need
+ to mark some local variables volatile: if a non-volatile local
+ variable is modified between the setjmp and the longjmp, it has
+ indeterminate value. For the purposes of LLVM IR, it may be
+ sufficient to make loads and stores within the @try (to variables
+ declared outside the @try) volatile. This is necessary for
+ optimized correctness, but is not currently being done; this is
+ being tracked as rdar://problem/8160285
+
The basic framework for a @try-catch-finally is as follows:
{
objc_exception_data d;
@@ -2560,37 +2627,33 @@ llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
Rethrows and Jumps-Through-Finally
--
- Support for implicit rethrows and jumping through the finally block is
- handled by storing the current exception-handling context in
- ObjCEHStack.
-
- In order to implement proper @finally semantics, we support one basic
- mechanism for jumping through the finally block to an arbitrary
- destination. Constructs which generate exits from a @try or @catch
- block use this mechanism to implement the proper semantics by chaining
- jumps, as necessary.
-
- This mechanism works like the one used for indirect goto: we
- arbitrarily assign an ID to each destination and store the ID for the
- destination in a variable prior to entering the finally block. At the
- end of the finally block we simply create a switch to the proper
- destination.
-
- Code gen for @synchronized(expr) stmt;
- Effectively generating code for:
- objc_sync_enter(expr);
- @try stmt @finally { objc_sync_exit(expr); }
+ '@throw;' is supported by pushing the currently-caught exception
+ onto ObjCEHStack while the @catch blocks are emitted.
+
+ Branches through the @finally block are handled with an ordinary
+ normal cleanup. We do not register an EH cleanup; fragile-ABI ObjC
+ exceptions are not compatible with C++ exceptions, and this is
+ hardly the only place where this will go wrong.
+
+ @synchronized(expr) { stmt; } is emitted as if it were:
+ id synch_value = expr;
+ objc_sync_enter(synch_value);
+ @try { stmt; } @finally { objc_sync_exit(synch_value); }
*/
void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const Stmt &S) {
bool isTry = isa<ObjCAtTryStmt>(S);
- // Create various blocks we refer to for handling @finally.
- llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
- llvm::BasicBlock *FinallyExit = CGF.createBasicBlock("finally.exit");
- llvm::BasicBlock *FinallyNoExit = CGF.createBasicBlock("finally.noexit");
- llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // A destination for the fall-through edges of the catch handlers to
+ // jump to.
+ CodeGenFunction::JumpDest FinallyEnd =
+ CGF.getJumpDestInCurrentScope("finally.end");
+
+ // A destination for the rethrow edge of the catch handlers to jump
+ // to.
+ CodeGenFunction::JumpDest FinallyRethrow =
+ CGF.getJumpDestInCurrentScope("finally.rethrow");
// For @synchronized, call objc_sync_enter(sync.expr). The
// evaluation of the expression must occur before we enter the
@@ -2601,75 +2664,139 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
- CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
+ ->setDoesNotThrow();
}
- // Push an EH context entry, used for handling rethrows and jumps
- // through finally.
- CGF.PushCleanupBlock(FinallyBlock);
-
- if (CGF.ObjCEHValueStack.empty())
- CGF.ObjCEHValueStack.push_back(0);
- // If This is a nested @try, caught exception is that of enclosing @try.
- else
- CGF.ObjCEHValueStack.push_back(CGF.ObjCEHValueStack.back());
// Allocate memory for the exception data and rethrow pointer.
llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
"exceptiondata.ptr");
llvm::Value *RethrowPtr = CGF.CreateTempAlloca(ObjCTypes.ObjectPtrTy,
"_rethrow");
- llvm::Value *CallTryExitPtr = CGF.CreateTempAlloca(
- llvm::Type::getInt1Ty(VMContext),
+
+ // Create a flag indicating whether the cleanup needs to call
+ // objc_exception_try_exit. This is true except when
+ // - no catches match and we're branching through the cleanup
+ // just to rethrow the exception, or
+ // - a catch matched and we're falling out of the catch handler.
+ llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
"_call_try_exit");
CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext),
- CallTryExitPtr);
+ CallTryExitVar);
+
+ // Push a normal cleanup to leave the try scope.
+ {
+ CodeGenFunction::CleanupBlock FinallyScope(CGF, NormalCleanup);
+
+ // Check whether we need to call objc_exception_try_exit.
+ // In optimized code, this branch will always be folded.
+ llvm::BasicBlock *FinallyCallExit =
+ CGF.createBasicBlock("finally.call_exit");
+ llvm::BasicBlock *FinallyNoCallExit =
+ CGF.createBasicBlock("finally.no_call_exit");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar),
+ FinallyCallExit, FinallyNoCallExit);
+
+ CGF.EmitBlock(FinallyCallExit);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ CGF.EmitBlock(FinallyNoCallExit);
+
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+
+ // ~CleanupBlock requires there to be an exit block.
+ CGF.EnsureInsertPoint();
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
+ ->setDoesNotThrow();
+ }
+ }
- // Enter a new try block and call setjmp.
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
- llvm::Value *JmpBufPtr = CGF.Builder.CreateStructGEP(ExceptionData, 0,
- "jmpbufarray");
- JmpBufPtr = CGF.Builder.CreateStructGEP(JmpBufPtr, 0, "tmp");
- llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
- JmpBufPtr, "result");
+ // Enter a try block:
+ // - Call objc_exception_try_enter to push ExceptionData on top of
+ // the EH stack.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+ // - Call setjmp on the exception data buffer.
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
+ llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
+ llvm::Value *SetJmpBuffer =
+ CGF.Builder.CreateGEP(ExceptionData, GEPIndexes, GEPIndexes+3, "setjmp_buffer");
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
+ SetJmpResult->setDoesNotThrow();
+
+ // If setjmp returned 0, enter the protected block; otherwise,
+ // branch to the handler.
llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
- CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(SetJmpResult, "threw"),
- TryHandler, TryBlock);
+ llvm::Value *DidCatch =
+ CGF.Builder.CreateIsNull(SetJmpResult, "did_catch_exception");
+ CGF.Builder.CreateCondBr(DidCatch, TryBlock, TryHandler);
- // Emit the @try block.
+ // Emit the protected block.
CGF.EmitBlock(TryBlock);
CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
- : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
CGF.EmitBranchThroughCleanup(FinallyEnd);
- // Emit the "exception in @try" block.
+ // Emit the exception handler block.
CGF.EmitBlock(TryHandler);
// Retrieve the exception object. We may emit multiple blocks but
// nothing can cross this so the value is already in SSA form.
- llvm::Value *Caught =
+ llvm::CallInst *Caught =
CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
ExceptionData, "caught");
- CGF.ObjCEHValueStack.back() = Caught;
- if (!isTry) {
- CGF.Builder.CreateStore(Caught, RethrowPtr);
+ Caught->setDoesNotThrow();
+
+ // Remember the exception to rethrow.
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+
+ // Note: at this point, objc_exception_throw already popped the
+ // catch handler, so anything that branches to the cleanup needs
+ // to set CallTryExitVar to false.
+
+ // For a @synchronized (or a @try with no catches), just branch
+ // through the cleanup to the rethrow block.
+ if (!isTry || !cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ // Tell the cleanup not to re-pop the exit.
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitPtr);
+ CallTryExitVar);
+
CGF.EmitBranchThroughCleanup(FinallyRethrow);
- } else if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+
+ // Otherwise, we have to match against the caught exceptions.
+ } else {
+ // Push the exception to rethrow onto the EH value stack for the
+ // benefit of any @throws in the handlers.
+ CGF.ObjCEHValueStack.push_back(Caught);
+
const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S);
// Enter a new exception try block (in case a @catch block throws
- // an exception).
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+ // an exception). Now CallTryExitVar (currently true) is back in
+ // synch with reality.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
- llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
- JmpBufPtr, "result");
- llvm::Value *Threw = CGF.Builder.CreateIsNotNull(SetJmpResult, "threw");
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer,
+ "setjmp.result");
+ SetJmpResult->setDoesNotThrow();
+
+ llvm::Value *Threw =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
llvm::BasicBlock *CatchBlock = CGF.createBasicBlock("catch");
- llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch.handler");
+ llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch_for_catch");
CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
CGF.EmitBlock(CatchBlock);
@@ -2680,7 +2807,6 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
bool AllMatched = false;
for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) {
const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I);
- llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch");
const VarDecl *CatchParam = CatchStmt->getCatchParamDecl();
const ObjCObjectPointerType *OPT = 0;
@@ -2691,47 +2817,67 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
} else {
OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>();
- // catch(id e) always matches.
+ // catch(id e) always matches under this ABI, since only
+ // ObjC exceptions end up here in the first place.
// FIXME: For the time being we also match id<X>; this should
// be rejected by Sema instead.
if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType()))
AllMatched = true;
}
+ // If this is a catch-all, we don't need to test anything.
if (AllMatched) {
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
if (CatchParam) {
CGF.EmitLocalBlockVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // These types work out because ConvertType(id) == i8*.
CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
}
CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // The scope of the catch variable ends right here.
+ CatchVarCleanups.ForceCleanup();
+
CGF.EmitBranchThroughCleanup(FinallyEnd);
break;
}
assert(OPT && "Unexpected non-object pointer type in @catch");
const ObjCObjectType *ObjTy = OPT->getObjectType();
+
+ // FIXME: @catch (Class c) ?
ObjCInterfaceDecl *IDecl = ObjTy->getInterface();
assert(IDecl && "Catch parameter must have Objective-C type!");
// Check if the @catch block matches the exception object.
llvm::Value *Class = EmitClassRef(CGF.Builder, IDecl);
- llvm::Value *Match =
+ llvm::CallInst *Match =
CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
Class, Caught, "match");
+ Match->setDoesNotThrow();
- llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("matched");
+ llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("match");
+ llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch.next");
CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
MatchedBlock, NextCatchBlock);
// Emit the @catch block.
CGF.EmitBlock(MatchedBlock);
+
+ // Collect any cleanups for the catch variable. The scope lasts until
+ // the end of the catch body.
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
CGF.EmitLocalBlockVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+ // Initialize the catch variable.
llvm::Value *Tmp =
CGF.Builder.CreateBitCast(Caught,
CGF.ConvertType(CatchParam->getType()),
@@ -2739,11 +2885,17 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // We're done with the catch variable.
+ CatchVarCleanups.ForceCleanup();
+
CGF.EmitBranchThroughCleanup(FinallyEnd);
CGF.EmitBlock(NextCatchBlock);
}
+ CGF.ObjCEHValueStack.pop_back();
+
if (!AllMatched) {
// None of the handlers caught the exception, so store it to be
// rethrown at the end of the @finally block.
@@ -2753,59 +2905,34 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Emit the exception handler for the @catch blocks.
CGF.EmitBlock(CatchHandler);
- CGF.Builder.CreateStore(
- CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData),
- RethrowPtr);
- CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
- } else {
+
+ // Rethrow the new exception, not the old one.
+ Caught = CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData);
+ Caught->setDoesNotThrow();
CGF.Builder.CreateStore(Caught, RethrowPtr);
+
+ // Don't pop the catch handler; the throw already did.
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitPtr);
+ CallTryExitVar);
CGF.EmitBranchThroughCleanup(FinallyRethrow);
}
- // Pop the exception-handling stack entry. It is important to do
- // this now, because the code in the @finally block is not in this
- // context.
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
-
- CGF.ObjCEHValueStack.pop_back();
-
- // Emit the @finally block.
- CGF.EmitBlock(FinallyBlock);
- llvm::Value* CallTryExit = CGF.Builder.CreateLoad(CallTryExitPtr, "tmp");
+ // Pop the cleanup.
+ CGF.PopCleanupBlock();
+ CGF.EmitBlock(FinallyEnd.Block);
- CGF.Builder.CreateCondBr(CallTryExit, FinallyExit, FinallyNoExit);
-
- CGF.EmitBlock(FinallyExit);
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData);
-
- CGF.EmitBlock(FinallyNoExit);
- if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
- cast<ObjCAtTryStmt>(S).getFinallyStmt())
- CGF.EmitStmt(FinallyStmt->getFinallyBody());
- } else {
- // Emit objc_sync_exit(expr); as finally's sole statement for
- // @synchronized.
- CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+ // Emit the rethrow block.
+ CGF.Builder.ClearInsertionPoint();
+ CGF.EmitBlock(FinallyRethrow.Block, true);
+ if (CGF.HaveInsertPoint()) {
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
+ CGF.Builder.CreateLoad(RethrowPtr))
+ ->setDoesNotThrow();
+ CGF.Builder.CreateUnreachable();
}
- // Emit the switch block
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
-
- CGF.EmitBlock(FinallyRethrow);
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
- CGF.Builder.CreateLoad(RethrowPtr));
- CGF.Builder.CreateUnreachable();
-
- CGF.EmitBlock(FinallyEnd);
+ CGF.Builder.SetInsertPoint(FinallyEnd.Block);
}
void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
@@ -2822,7 +2949,8 @@ void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
ExceptionAsObject = CGF.ObjCEHValueStack.back();
}
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject)
+ ->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
// Clear the insertion point to indicate we are in unreachable code.
@@ -2929,15 +3057,11 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
- QualType Ty) {
- // Get size info for this aggregate.
- std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
- unsigned long size = TypeInfo.first/8;
+ llvm::Value *size) {
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
- llvm::Value *N = llvm::ConstantInt::get(ObjCTypes.LongTy, size);
CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
- DestPtr, SrcPtr, N);
+ DestPtr, SrcPtr, size);
return;
}
@@ -2997,12 +3121,14 @@ void CGObjCCommonMac::EmitImageInfo() {
// We never allow @synthesize of a superclass property.
flags |= eImageInfo_CorrectedSynthesize;
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+
// Emitted as int[2];
llvm::Constant *values[2] = {
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), version),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags)
+ llvm::ConstantInt::get(Int32Ty, version),
+ llvm::ConstantInt::get(Int32Ty, flags)
};
- llvm::ArrayType *AT = llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), 2);
+ llvm::ArrayType *AT = llvm::ArrayType::get(Int32Ty, 2);
const char *Section;
if (ObjCABI == 1)
@@ -3102,7 +3228,8 @@ llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
return Builder.CreateLoad(Entry, "tmp");
}
-llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) {
+llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lvalue) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
if (!Entry) {
@@ -3115,6 +3242,8 @@ llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) {
4, true);
}
+ if (lvalue)
+ return Entry;
return Builder.CreateLoad(Entry, "tmp");
}
@@ -3632,8 +3761,14 @@ void CGObjCMac::FinishModule() {
OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
<< "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
- e = LazySymbols.end(); I != e; ++I)
+ e = LazySymbols.end(); I != e; ++I) {
OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
+ }
+
+ for (size_t i = 0; i < DefinedCategoryNames.size(); ++i) {
+ OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n"
+ << "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n";
+ }
CGM.getModule().setModuleInlineAsm(OS.str());
}
@@ -3949,8 +4084,9 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
llvm::Type::getInt8PtrTy(VMContext), 4);
ExceptionDataTy =
- llvm::StructType::get(VMContext, llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
- SetJmpBufferSize),
+ llvm::StructType::get(VMContext,
+ llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
+ SetJmpBufferSize),
StackPtrTy, NULL);
CGM.getModule().addTypeName("struct._objc_exception_data",
ExceptionDataTy);
@@ -5147,7 +5283,7 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
FunctionType::ExtInfo());
llvm::Constant *Fn = 0;
std::string Name("\01l_");
- if (CGM.ReturnTypeUsesSret(FnInfo)) {
+ if (CGM.ReturnTypeUsesSRet(FnInfo)) {
#if 0
// unlike what is documented. gcc never generates this API!!
if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
@@ -5164,14 +5300,9 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
Fn = ObjCTypes.getMessageSendStretFixupFn();
Name += "objc_msgSend_stret_fixup";
}
- } else if (!IsSuper && ResultType->isFloatingType()) {
- if (ResultType->isSpecificBuiltinType(BuiltinType::LongDouble)) {
- Fn = ObjCTypes.getMessageSendFpretFixupFn();
- Name += "objc_msgSend_fpret_fixup";
- } else {
- Fn = ObjCTypes.getMessageSendFixupFn();
- Name += "objc_msgSend_fixup";
- }
+ } else if (!IsSuper && CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = ObjCTypes.getMessageSendFpretFixupFn();
+ Name += "objc_msgSend_fpret_fixup";
} else {
#if 0
// unlike what is documented. gcc never generates this API!!
@@ -5403,7 +5534,7 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
}
llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
- Selector Sel) {
+ Selector Sel, bool lval) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
if (!Entry) {
@@ -5418,6 +5549,8 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
CGM.AddUsedGlobal(Entry);
}
+ if (lval)
+ return Entry;
return Builder.CreateLoad(Entry, "tmp");
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
@@ -5467,15 +5600,11 @@ void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
CodeGen::CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
- QualType Ty) {
- // Get size info for this aggregate.
- std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
- unsigned long size = TypeInfo.first/8;
+ llvm::Value *Size) {
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
- llvm::Value *N = llvm::ConstantInt::get(ObjCTypes.LongTy, size);
CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
- DestPtr, SrcPtr, N);
+ DestPtr, SrcPtr, Size);
return;
}
@@ -5535,75 +5664,92 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
}
void
-CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S) {
- bool isTry = isa<ObjCAtTryStmt>(S);
- llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
- llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
- llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
- llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
- llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ // Evaluate the lock operand. This should dominate the cleanup.
+ llvm::Value *SyncArg = CGF.EmitScalarExpr(S.getSynchExpr());
- // For @synchronized, call objc_sync_enter(sync.expr). The
- // evaluation of the expression must occur before we enter the
- // @synchronized. We can safely avoid a temp here because jumps into
- // @synchronized are illegal & this will dominate uses.
- llvm::Value *SyncArg = 0;
- if (!isTry) {
- SyncArg =
- CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
- CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ // Acquire the lock.
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
+ ->setDoesNotThrow();
+
+ // Register an all-paths cleanup to release the lock.
+ {
+ CodeGenFunction::CleanupBlock ReleaseScope(CGF, NormalAndEHCleanup);
+
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
+ ->setDoesNotThrow();
}
- // Push an EH context entry, used for handling rethrows and jumps
- // through finally.
- CGF.PushCleanupBlock(FinallyBlock);
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
- CGF.setInvokeDest(TryHandler);
+ // Pop the lock-release cleanup.
+ CGF.PopCleanupBlock();
+}
- CGF.EmitBlock(TryBlock);
- CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
- : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
- CGF.EmitBranchThroughCleanup(FinallyEnd);
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
- // Emit the exception handler.
+ struct CallObjCEndCatch : EHScopeStack::LazyCleanup {
+ CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
+ MightThrow(MightThrow), Fn(Fn) {}
+ bool MightThrow;
+ llvm::Value *Fn;
- CGF.EmitBlock(TryHandler);
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(Fn, 0, 0);
+ }
+ };
+}
- llvm::Value *llvm_eh_exception =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- llvm::Value *llvm_eh_typeid_for =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
-
- llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
- SelectorArgs.push_back(Exc);
- SelectorArgs.push_back(ObjCTypes.getEHPersonalityPtr());
-
- // Construct the lists of (type, catch body) to handle.
- llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers;
- bool HasCatchAll = false;
- if (isTry) {
- const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S);
- for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) {
- const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I);
+void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
+ FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(),
+ ObjCTypes.getObjCBeginCatchFn(),
+ ObjCTypes.getObjCEndCatchFn(),
+ ObjCTypes.getExceptionRethrowFn());
+
+ llvm::SmallVector<CatchHandler, 8> Handlers;
+
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
- // catch(...) always matches.
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
+
+ // @catch(...) always matches.
if (!CatchDecl) {
- // Use i8* null here to signal this is a catch all, not a cleanup.
- llvm::Value *Null = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
- SelectorArgs.push_back(Null);
- HasCatchAll = true;
+ Handler.TypeInfo = 0; // catch-all
+ // Don't consider any other catches.
break;
}
+ // There's a particular fixed type info for 'id'.
if (CatchDecl->getType()->isObjCIdType() ||
CatchDecl->getType()->isObjCQualifiedIdType()) {
llvm::Value *IDEHType =
@@ -5614,7 +5760,7 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
false,
llvm::GlobalValue::ExternalLinkage,
0, "OBJC_EHTYPE_id");
- SelectorArgs.push_back(IDEHType);
+ Handler.TypeInfo = IDEHType;
} else {
// All other types should be Objective-C interface pointer types.
const ObjCObjectPointerType *PT =
@@ -5622,207 +5768,101 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
assert(PT && "Invalid @catch type.");
const ObjCInterfaceType *IT = PT->getInterfaceType();
assert(IT && "Invalid @catch type.");
- llvm::Value *EHType = GetInterfaceEHType(IT->getDecl(), false);
- SelectorArgs.push_back(EHType);
+ Handler.TypeInfo = GetInterfaceEHType(IT->getDecl(), false);
}
}
- }
- // We use a cleanup unless there was already a catch all.
- if (!HasCatchAll) {
- // Even though this is a cleanup, treat it as a catch all to avoid the C++
- // personality behavior of terminating the process if only cleanups are
- // found in the exception handling stack.
- SelectorArgs.push_back(llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy));
- Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
}
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
- llvm::Value *Selector =
- CGF.Builder.CreateCall(llvm_eh_selector,
- SelectorArgs.begin(), SelectorArgs.end(),
- "selector");
- for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
- const VarDecl *CatchParam = Handlers[i].first;
- const Stmt *CatchBody = Handlers[i].second;
-
- llvm::BasicBlock *Next = 0;
-
- // The last handler always matches.
- if (i + 1 != e) {
- assert(CatchParam && "Only last handler can be a catch all.");
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.EHStack.popCatch();
- llvm::BasicBlock *Match = CGF.createBasicBlock("match");
- Next = CGF.createBasicBlock("catch.next");
- llvm::Value *Id =
- CGF.Builder.CreateCall(llvm_eh_typeid_for,
- CGF.Builder.CreateBitCast(SelectorArgs[i+2],
- ObjCTypes.Int8PtrTy));
- CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(Selector, Id),
- Match, Next);
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
- CGF.EmitBlock(Match);
- }
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
- if (CatchBody) {
- llvm::BasicBlock *MatchEnd = CGF.createBasicBlock("match.end");
-
- // Cleanups must call objc_end_catch.
- CGF.PushCleanupBlock(MatchEnd);
-
- llvm::Value *ExcObject =
- CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), Exc);
-
- // Bind the catch parameter if it exists.
- if (CatchParam) {
- ExcObject =
- CGF.Builder.CreateBitCast(ExcObject,
- CGF.ConvertType(CatchParam->getType()));
- // CatchParam is a ParmVarDecl because of the grammar
- // construction used to handle this, but for codegen purposes
- // we treat this as a local decl.
- CGF.EmitLocalBlockVarDecl(*CatchParam);
- CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
- }
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *RawExn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
- // Exceptions inside the catch block must be rethrown. We set a special
- // purpose invoke destination for this which just collects the thrown
- // exception and overwrites the object in RethrowPtr, branches through the
- // match.end to make sure we call objc_end_catch, before branching to the
- // rethrow handler.
- llvm::BasicBlock *MatchHandler = CGF.createBasicBlock("match.handler");
- CGF.setInvokeDest(MatchHandler);
- CGF.ObjCEHValueStack.push_back(ExcObject);
- CGF.EmitStmt(CatchBody);
- CGF.ObjCEHValueStack.pop_back();
- CGF.setInvokeDest(0);
+ // Enter the catch.
+ llvm::CallInst *Exn =
+ CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), RawExn,
+ "exn.adjusted");
+ Exn->setDoesNotThrow();
- CGF.EmitBranchThroughCleanup(FinallyEnd);
+ // Add a cleanup to leave the catch.
+ bool EndCatchMightThrow = (Handler.Variable == 0);
+ CGF.EHStack.pushLazyCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
+ EndCatchMightThrow,
+ ObjCTypes.getObjCEndCatchFn());
- // Don't emit the extra match handler if there we no unprotected calls in
- // the catch block.
- if (MatchHandler->use_empty()) {
- delete MatchHandler;
- } else {
- CGF.EmitBlock(MatchHandler);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- CGF.Builder.CreateCall3(llvm_eh_selector,
- Exc, ObjCTypes.getEHPersonalityPtr(),
- llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 0),
- "unused_eh_selector");
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
- }
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
-
- CGF.EmitBlock(MatchEnd);
-
- // Unfortunately, we also have to generate another EH frame here
- // in case this throws.
- llvm::BasicBlock *MatchEndHandler =
- CGF.createBasicBlock("match.end.handler");
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(ObjCTypes.getObjCEndCatchFn(),
- Cont, MatchEndHandler);
-
- CGF.EmitBlock(Cont);
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
-
- CGF.EmitBlock(MatchEndHandler);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- CGF.Builder.CreateCall3(llvm_eh_selector,
- Exc, ObjCTypes.getEHPersonalityPtr(),
- llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 0),
- "unused_eh_selector");
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam));
+ }
- if (Next)
- CGF.EmitBlock(Next);
- } else {
- assert(!Next && "catchup should be last handler.");
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
- }
- }
+ // Leave the earlier cleanup.
+ CGF.PopCleanupBlock();
- // Pop the cleanup entry, the @finally is outside this cleanup
- // scope.
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
- CGF.setInvokeDest(PrevLandingPad);
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
- CGF.EmitBlock(FinallyBlock);
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
- if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
- cast<ObjCAtTryStmt>(S).getFinallyStmt())
- CGF.EmitStmt(FinallyStmt->getFinallyBody());
- } else {
- // Emit 'objc_sync_exit(expr)' as finally's sole statement for
- // @synchronized.
- CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
- }
-
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
-
- // Branch around the rethrow code.
- CGF.EmitBranch(FinallyEnd);
-
- // Generate the rethrow code, taking care to use an invoke if we are in a
- // nested exception scope.
- CGF.EmitBlock(FinallyRethrow);
- if (PrevLandingPad) {
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(ObjCTypes.getUnwindResumeOrRethrowFn(),
- Cont, PrevLandingPad,
- CGF.Builder.CreateLoad(RethrowPtr));
- CGF.EmitBlock(Cont);
- } else {
- CGF.Builder.CreateCall(ObjCTypes.getUnwindResumeOrRethrowFn(),
- CGF.Builder.CreateLoad(RethrowPtr));
- }
- CGF.Builder.CreateUnreachable();
+ // Pop out of the normal cleanup on the finally.
+ if (S.getFinallyStmt())
+ CGF.ExitFinallyBlock(FinallyInfo);
- CGF.EmitBlock(FinallyEnd);
+ if (Cont.Block)
+ CGF.EmitBlock(Cont.Block);
}
/// EmitThrowStmt - Generate code for a throw statement.
void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S) {
llvm::Value *Exception;
+ llvm::Constant *FunctionThrowOrRethrow;
if (const Expr *ThrowExpr = S.getThrowExpr()) {
Exception = CGF.EmitScalarExpr(ThrowExpr);
+ FunctionThrowOrRethrow = ObjCTypes.getExceptionThrowFn();
} else {
assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
"Unexpected rethrow outside @catch block.");
Exception = CGF.ObjCEHValueStack.back();
+ FunctionThrowOrRethrow = ObjCTypes.getExceptionRethrowFn();
}
llvm::Value *ExceptionAsObject =
CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
if (InvokeDest) {
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(ObjCTypes.getExceptionThrowFn(),
- Cont, InvokeDest,
+ CGF.Builder.CreateInvoke(FunctionThrowOrRethrow,
+ CGF.getUnreachableBlock(), InvokeDest,
&ExceptionAsObject, &ExceptionAsObject + 1);
- CGF.EmitBlock(Cont);
- } else
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
- CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateCall(FunctionThrowOrRethrow, ExceptionAsObject)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+ }
// Clear the insertion point to indicate we are in unreachable code.
CGF.Builder.ClearInsertionPoint();
@@ -5863,7 +5903,8 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
llvm::GlobalValue::ExternalLinkage,
0, VTableName);
- llvm::Value *VTableIdx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2);
+ llvm::Value *VTableIdx =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2);
std::vector<llvm::Constant*> Values(3);
Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
index 8de7f10..eb79f09 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
@@ -97,7 +97,7 @@ public:
/// return value should have the LLVM type for pointer-to
/// ASTContext::getObjCSelType().
virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
- Selector Sel) = 0;
+ Selector Sel, bool lval=false) = 0;
/// Get a typed selector.
virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
@@ -181,8 +181,10 @@ public:
/// compiler when a mutation is detected during foreach iteration.
virtual llvm::Constant *EnumerationMutationFunction() = 0;
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S) = 0;
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) = 0;
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) = 0;
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S) = 0;
virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
@@ -208,7 +210,7 @@ public:
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
- QualType Ty) = 0;
+ llvm::Value *Size) = 0;
};
/// Creates an instance of an Objective-C runtime class.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
index aec1c45..1cca977 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
@@ -271,7 +271,7 @@ static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
// Get the key function.
const CXXMethodDecl *KeyFunction = RD->getASTContext().getKeyFunction(RD);
- if (KeyFunction && !KeyFunction->getBody()) {
+ if (KeyFunction && !KeyFunction->hasBody()) {
// The class has a key function, but it is not defined in this translation
// unit, so we should use the external descriptor for it.
return true;
@@ -728,15 +728,19 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) {
QualType PointeeTy = Ty->getPointeeType();
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
// Itanium C++ ABI 2.9.5p7:
// __flags is a flag word describing the cv-qualification and other
// attributes of the type pointed to
- unsigned Flags = ComputeQualifierFlags(PointeeTy.getQualifiers());
+ unsigned Flags = ComputeQualifierFlags(Quals);
// Itanium C++ ABI 2.9.5p7:
// When the abi::__pbase_type_info is for a direct or indirect pointer to an
// incomplete class type, the incomplete target type flag is set.
- if (ContainsIncompleteClassType(PointeeTy))
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
Flags |= PTI_Incomplete;
const llvm::Type *UnsignedIntLTy =
@@ -747,7 +751,7 @@ void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) {
// __pointee is a pointer to the std::type_info derivation for the
// unqualified type being pointed to.
llvm::Constant *PointeeTypeInfo =
- RTTIBuilder(CGM).BuildTypeInfo(PointeeTy.getUnqualifiedType());
+ RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy);
Fields.push_back(PointeeTypeInfo);
}
@@ -756,17 +760,21 @@ void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) {
void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
QualType PointeeTy = Ty->getPointeeType();
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
// Itanium C++ ABI 2.9.5p7:
// __flags is a flag word describing the cv-qualification and other
// attributes of the type pointed to.
- unsigned Flags = ComputeQualifierFlags(PointeeTy.getQualifiers());
+ unsigned Flags = ComputeQualifierFlags(Quals);
const RecordType *ClassType = cast<RecordType>(Ty->getClass());
// Itanium C++ ABI 2.9.5p7:
// When the abi::__pbase_type_info is for a direct or indirect pointer to an
// incomplete class type, the incomplete target type flag is set.
- if (ContainsIncompleteClassType(PointeeTy))
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
Flags |= PTI_Incomplete;
if (IsIncompleteClassType(ClassType))
@@ -780,7 +788,7 @@ void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
// __pointee is a pointer to the std::type_info derivation for the
// unqualified type being pointed to.
llvm::Constant *PointeeTypeInfo =
- RTTIBuilder(CGM).BuildTypeInfo(PointeeTy.getUnqualifiedType());
+ RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy);
Fields.push_back(PointeeTypeInfo);
// Itanium C++ ABI 2.9.5p9:
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
index efde380..b72725e 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -79,11 +79,8 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
// Expression emitters don't handle unreachable blocks yet, so look for one
// explicitly here. This handles the common case of a call to a noreturn
// function.
- // We can't erase blocks with an associated cleanup size here since the
- // memory might be reused, leaving the old cleanup info pointing at a new
- // block.
if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
- if (CurBB->empty() && CurBB->use_empty() && !BlockScopes.count(CurBB)) {
+ if (CurBB->empty() && CurBB->use_empty()) {
CurBB->eraseFromParent();
Builder.ClearInsertionPoint();
}
@@ -159,7 +156,7 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
}
// Keep track of the current cleanup stack depth.
- CleanupScope Scope(*this);
+ RunCleanupsScope Scope(*this);
for (CompoundStmt::const_body_iterator I = S.body_begin(),
E = S.body_end()-GetLast; I != E; ++I)
@@ -198,7 +195,7 @@ void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
// If there is a cleanup stack, then we it isn't worth trying to
// simplify this block (we would need to remove it from the scope map
// and cleanup entry).
- if (!CleanupEntries.empty())
+ if (!EHStack.empty())
return;
// Can only simplify direct branches.
@@ -221,18 +218,6 @@ void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
return;
}
- // If necessary, associate the block with the cleanup stack size.
- if (!CleanupEntries.empty()) {
- // Check if the basic block has already been inserted.
- BlockScopeMap::iterator I = BlockScopes.find(BB);
- if (I != BlockScopes.end()) {
- assert(I->second == CleanupEntries.size() - 1);
- } else {
- BlockScopes[BB] = CleanupEntries.size() - 1;
- CleanupEntries.back().Blocks.push_back(BB);
- }
- }
-
// Place the block after the current block, if possible, or else at
// the end of the function.
if (CurBB && CurBB->getParent())
@@ -259,8 +244,35 @@ void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
Builder.ClearInsertionPoint();
}
+CodeGenFunction::JumpDest
+CodeGenFunction::getJumpDestForLabel(const LabelStmt *S) {
+ JumpDest &Dest = LabelMap[S];
+ if (Dest.Block) return Dest;
+
+ // Create, but don't insert, the new block.
+ Dest.Block = createBasicBlock(S->getName());
+ Dest.ScopeDepth = EHScopeStack::stable_iterator::invalid();
+ return Dest;
+}
+
void CodeGenFunction::EmitLabel(const LabelStmt &S) {
- EmitBlock(getBasicBlockForLabel(&S));
+ JumpDest &Dest = LabelMap[&S];
+
+ // If we didn't needed a forward reference to this label, just go
+ // ahead and create a destination at the current scope.
+ if (!Dest.Block) {
+ Dest = getJumpDestInCurrentScope(S.getName());
+
+ // Otherwise, we need to give this label a target depth and remove
+ // it from the branch-fixups list.
+ } else {
+ assert(!Dest.ScopeDepth.isValid() && "already emitted label!");
+ Dest.ScopeDepth = EHStack.stable_begin();
+
+ EHStack.resolveBranchFixups(Dest.Block);
+ }
+
+ EmitBlock(Dest.Block);
}
@@ -276,7 +288,7 @@ void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
if (HaveInsertPoint())
EmitStopPoint(&S);
- EmitBranchThroughCleanup(getBasicBlockForLabel(S.getLabel()));
+ EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
}
@@ -301,7 +313,7 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// C99 6.8.4.1: The first substatement is executed if the expression compares
// unequal to 0. The condition must be a scalar type.
- CleanupScope ConditionScope(*this);
+ RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
EmitLocalBlockVarDecl(*S.getConditionVariable());
@@ -318,7 +330,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// This avoids emitting dead code and simplifies the CFG substantially.
if (!ContainsLabel(Skipped)) {
if (Executed) {
- CleanupScope ExecutedScope(*this);
+ RunCleanupsScope ExecutedScope(*this);
EmitStmt(Executed);
}
return;
@@ -337,7 +349,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// Emit the 'then' code.
EmitBlock(ThenBlock);
{
- CleanupScope ThenScope(*this);
+ RunCleanupsScope ThenScope(*this);
EmitStmt(S.getThen());
}
EmitBranch(ContBlock);
@@ -346,7 +358,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
if (const Stmt *Else = S.getElse()) {
EmitBlock(ElseBlock);
{
- CleanupScope ElseScope(*this);
+ RunCleanupsScope ElseScope(*this);
EmitStmt(Else);
}
EmitBranch(ContBlock);
@@ -357,20 +369,17 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
}
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
- // Emit the header for the loop, insert it, which will create an uncond br to
- // it.
- llvm::BasicBlock *LoopHeader = createBasicBlock("while.cond");
- EmitBlock(LoopHeader);
-
- // Create an exit block for when the condition fails, create a block for the
- // body of the loop.
- llvm::BasicBlock *ExitBlock = createBasicBlock("while.end");
- llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
- llvm::BasicBlock *CleanupBlock = 0;
- llvm::BasicBlock *EffectiveExitBlock = ExitBlock;
+ // Emit the header for the loop, which will also become
+ // the continue target.
+ JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
+ EmitBlock(LoopHeader.Block);
+
+ // Create an exit block for when the condition fails, which will
+ // also become the break target.
+ JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
// Store the blocks to use for break and continue.
- BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader));
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
// C++ [stmt.while]p2:
// When the condition of a while statement is a declaration, the
@@ -379,18 +388,10 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
// [...]
// The object created in a condition is destroyed and created
// with each iteration of the loop.
- CleanupScope ConditionScope(*this);
+ RunCleanupsScope ConditionScope(*this);
- if (S.getConditionVariable()) {
+ if (S.getConditionVariable())
EmitLocalBlockVarDecl(*S.getConditionVariable());
-
- // If this condition variable requires cleanups, create a basic
- // block to handle those cleanups.
- if (ConditionScope.requiresCleanups()) {
- CleanupBlock = createBasicBlock("while.cleanup");
- EffectiveExitBlock = CleanupBlock;
- }
- }
// Evaluate the conditional in the while header. C99 6.8.5.1: The
// evaluation of the controlling expression takes place before each
@@ -405,61 +406,63 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
EmitBoolCondBranch = false;
// As long as the condition is true, go to the loop body.
- if (EmitBoolCondBranch)
- Builder.CreateCondBr(BoolCondVal, LoopBody, EffectiveExitBlock);
+ llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
+ if (EmitBoolCondBranch) {
+ llvm::BasicBlock *ExitBlock = LoopExit.Block;
+ if (ConditionScope.requiresCleanups())
+ ExitBlock = createBasicBlock("while.exit");
+
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.Block) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+ }
- // Emit the loop body.
+ // Emit the loop body. We have to emit this in a cleanup scope
+ // because it might be a singleton DeclStmt.
{
- CleanupScope BodyScope(*this);
+ RunCleanupsScope BodyScope(*this);
EmitBlock(LoopBody);
EmitStmt(S.getBody());
}
BreakContinueStack.pop_back();
- if (CleanupBlock) {
- // If we have a cleanup block, jump there to perform cleanups
- // before looping.
- EmitBranch(CleanupBlock);
+ // Immediately force cleanup.
+ ConditionScope.ForceCleanup();
- // Emit the cleanup block, performing cleanups for the condition
- // and then jumping to either the loop header or the exit block.
- EmitBlock(CleanupBlock);
- ConditionScope.ForceCleanup();
- Builder.CreateCondBr(BoolCondVal, LoopHeader, ExitBlock);
- } else {
- // Cycle to the condition.
- EmitBranch(LoopHeader);
- }
+ // Branch to the loop header again.
+ EmitBranch(LoopHeader.Block);
// Emit the exit block.
- EmitBlock(ExitBlock, true);
-
+ EmitBlock(LoopExit.Block, true);
// The LoopHeader typically is just a branch if we skipped emitting
// a branch, try to erase it.
- if (!EmitBoolCondBranch && !CleanupBlock)
- SimplifyForwardingBlocks(LoopHeader);
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopHeader.Block);
}
void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
- // Emit the body for the loop, insert it, which will create an uncond br to
- // it.
- llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
- llvm::BasicBlock *AfterDo = createBasicBlock("do.end");
- EmitBlock(LoopBody);
-
- llvm::BasicBlock *DoCond = createBasicBlock("do.cond");
+ JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
+ JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
// Store the blocks to use for break and continue.
- BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond));
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
- // Emit the body of the loop into the block.
- EmitStmt(S.getBody());
+ // Emit the body of the loop.
+ llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+ EmitBlock(LoopBody);
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
BreakContinueStack.pop_back();
- EmitBlock(DoCond);
+ EmitBlock(LoopCond.Block);
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
// after each execution of the loop body."
@@ -478,47 +481,49 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
// As long as the condition is true, iterate the loop.
if (EmitBoolCondBranch)
- Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo);
+ Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.Block);
// Emit the exit block.
- EmitBlock(AfterDo);
+ EmitBlock(LoopExit.Block);
// The DoCond block typically is just a branch if we skipped
// emitting a branch, try to erase it.
if (!EmitBoolCondBranch)
- SimplifyForwardingBlocks(DoCond);
+ SimplifyForwardingBlocks(LoopCond.Block);
}
void CodeGenFunction::EmitForStmt(const ForStmt &S) {
- CleanupScope ForScope(*this);
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ RunCleanupsScope ForScope(*this);
// Evaluate the first part before the loop.
if (S.getInit())
EmitStmt(S.getInit());
// Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
- llvm::BasicBlock *IncBlock = 0;
- llvm::BasicBlock *CondCleanup = 0;
- llvm::BasicBlock *EffectiveExitBlock = AfterFor;
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ JumpDest Continue = getJumpDestInCurrentScope("for.cond");
+ llvm::BasicBlock *CondBlock = Continue.Block;
EmitBlock(CondBlock);
// Create a cleanup scope for the condition variable cleanups.
- CleanupScope ConditionScope(*this);
+ RunCleanupsScope ConditionScope(*this);
llvm::Value *BoolCondVal = 0;
if (S.getCond()) {
// If the for statement has a condition scope, emit the local variable
// declaration.
+ llvm::BasicBlock *ExitBlock = LoopExit.Block;
if (S.getConditionVariable()) {
EmitLocalBlockVarDecl(*S.getConditionVariable());
-
- if (ConditionScope.requiresCleanups()) {
- CondCleanup = createBasicBlock("for.cond.cleanup");
- EffectiveExitBlock = CondCleanup;
- }
}
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
// As long as the condition is true, iterate the loop.
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
@@ -526,7 +531,12 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// C99 6.8.5p2/p4: The first substatement is executed if the expression
// compares unequal to 0. The condition must be a scalar type.
BoolCondVal = EvaluateExprAsBool(S.getCond());
- Builder.CreateCondBr(BoolCondVal, ForBody, EffectiveExitBlock);
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.Block) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
EmitBlock(ForBody);
} else {
@@ -535,17 +545,15 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
}
// If the for loop doesn't have an increment we can just use the
- // condition as the continue block.
- llvm::BasicBlock *ContinueBlock;
+ // condition as the continue block. Otherwise we'll need to create
+ // a block for it (in the current scope, i.e. in the scope of the
+ // condition), and that we will become our continue block.
if (S.getInc())
- ContinueBlock = IncBlock = createBasicBlock("for.inc");
- else
- ContinueBlock = CondBlock;
+ Continue = getJumpDestInCurrentScope("for.inc");
// Store the blocks to use for break and continue.
- BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock));
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
- // If the condition is true, execute the body of the for stmt.
CGDebugInfo *DI = getDebugInfo();
if (DI) {
DI->setLocation(S.getSourceRange().getBegin());
@@ -555,37 +563,30 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
{
// Create a separate cleanup scope for the body, in case it is not
// a compound statement.
- CleanupScope BodyScope(*this);
+ RunCleanupsScope BodyScope(*this);
EmitStmt(S.getBody());
}
// If there is an increment, emit it next.
if (S.getInc()) {
- EmitBlock(IncBlock);
+ EmitBlock(Continue.Block);
EmitStmt(S.getInc());
}
BreakContinueStack.pop_back();
-
- // Finally, branch back up to the condition for the next iteration.
- if (CondCleanup) {
- // Branch to the cleanup block.
- EmitBranch(CondCleanup);
-
- // Emit the cleanup block, which branches back to the loop body or
- // outside of the for statement once it is done.
- EmitBlock(CondCleanup);
- ConditionScope.ForceCleanup();
- Builder.CreateCondBr(BoolCondVal, CondBlock, AfterFor);
- } else
- EmitBranch(CondBlock);
+
+ ConditionScope.ForceCleanup();
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
if (DI) {
DI->setLocation(S.getSourceRange().getEnd());
DI->EmitRegionEnd(CurFn, Builder);
}
// Emit the fall-through block.
- EmitBlock(AfterFor, true);
+ EmitBlock(LoopExit.Block, true);
}
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
@@ -631,7 +632,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (FnRetTy->isReferenceType()) {
// If this function returns a reference, take the address of the expression
// rather than the value.
- RValue Result = EmitReferenceBindingToExpr(RV, false);
+ RValue Result = EmitReferenceBindingToExpr(RV, /*InitializedDecl=*/0);
Builder.CreateStore(Result.getScalarVal(), ReturnValue);
} else if (!hasAggregateLLVMType(RV->getType())) {
Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
@@ -666,7 +667,7 @@ void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
if (HaveInsertPoint())
EmitStopPoint(&S);
- llvm::BasicBlock *Block = BreakContinueStack.back().BreakBlock;
+ JumpDest Block = BreakContinueStack.back().BreakBlock;
EmitBranchThroughCleanup(Block);
}
@@ -679,7 +680,7 @@ void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
if (HaveInsertPoint())
EmitStopPoint(&S);
- llvm::BasicBlock *Block = BreakContinueStack.back().ContinueBlock;
+ JumpDest Block = BreakContinueStack.back().ContinueBlock;
EmitBranchThroughCleanup(Block);
}
@@ -788,7 +789,9 @@ void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
}
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
- CleanupScope ConditionScope(*this);
+ JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
+
+ RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
EmitLocalBlockVarDecl(*S.getConditionVariable());
@@ -803,7 +806,6 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// statement. We also need to create a default block now so that
// explicit case ranges tests can have a place to jump to on
// failure.
- llvm::BasicBlock *NextBlock = createBasicBlock("sw.epilog");
llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
CaseRangeBlock = DefaultBlock;
@@ -813,12 +815,11 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// All break statements jump to NextBlock. If BreakContinueStack is non empty
// then reuse last ContinueBlock.
- llvm::BasicBlock *ContinueBlock = 0;
+ JumpDest OuterContinue;
if (!BreakContinueStack.empty())
- ContinueBlock = BreakContinueStack.back().ContinueBlock;
+ OuterContinue = BreakContinueStack.back().ContinueBlock;
- // Ensure any vlas created between there and here, are undone
- BreakContinueStack.push_back(BreakContinue(NextBlock, ContinueBlock));
+ BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
// Emit switch body.
EmitStmt(S.getBody());
@@ -829,15 +830,22 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// been chained on top.
SwitchInsn->setSuccessor(0, CaseRangeBlock);
- // If a default was never emitted then reroute any jumps to it and
- // discard.
+ // If a default was never emitted:
if (!DefaultBlock->getParent()) {
- DefaultBlock->replaceAllUsesWith(NextBlock);
- delete DefaultBlock;
+ // If we have cleanups, emit the default block so that there's a
+ // place to jump through the cleanups from.
+ if (ConditionScope.requiresCleanups()) {
+ EmitBlock(DefaultBlock);
+
+ // Otherwise, just forward the default block to the switch end.
+ } else {
+ DefaultBlock->replaceAllUsesWith(SwitchExit.Block);
+ delete DefaultBlock;
+ }
}
// Emit continuation.
- EmitBlock(NextBlock, true);
+ EmitBlock(SwitchExit.Block, true);
SwitchInsn = SavedSwitchInsn;
CaseRangeBlock = SavedCRBlock;
@@ -1066,8 +1074,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
getContext().getTypeSize(InputTy)) {
// Use ptrtoint as appropriate so that we can do our extension.
if (isa<llvm::PointerType>(Arg->getType()))
- Arg = Builder.CreatePtrToInt(Arg,
- llvm::IntegerType::get(VMContext, LLVMPointerWidth));
+ Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
const llvm::Type *OutputTy = ConvertType(OutputType);
if (isa<llvm::IntegerType>(OutputTy))
Arg = Builder.CreateZExt(Arg, OutputTy);
@@ -1132,7 +1139,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// call.
unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding();
llvm::Value *LocIDC =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LocID);
+ llvm::ConstantInt::get(Int32Ty, LocID);
Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1));
// Extract all of the register value results from the asm.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
index a8f0467..fd7c616 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
@@ -15,14 +15,38 @@
using namespace clang;
using namespace CodeGen;
-void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
- llvm::Value *Ptr) {
- assert((LiveTemporaries.empty() ||
- LiveTemporaries.back().ThisPtr != Ptr ||
- ConditionalBranchLevel) &&
- "Pushed the same temporary twice; AST is likely wrong");
- llvm::BasicBlock *DtorBlock = createBasicBlock("temp.dtor");
+static void EmitTemporaryCleanup(CodeGenFunction &CGF,
+ const CXXTemporary *Temporary,
+ llvm::Value *Addr,
+ llvm::Value *CondPtr) {
+ llvm::BasicBlock *CondEnd = 0;
+
+ // If this is a conditional temporary, we need to check the condition
+ // boolean and only call the destructor if it's true.
+ if (CondPtr) {
+ llvm::BasicBlock *CondBlock = CGF.createBasicBlock("temp.cond-dtor.call");
+ CondEnd = CGF.createBasicBlock("temp.cond-dtor.cont");
+
+ llvm::Value *Cond = CGF.Builder.CreateLoad(CondPtr);
+ CGF.Builder.CreateCondBr(Cond, CondBlock, CondEnd);
+ CGF.EmitBlock(CondBlock);
+ }
+
+ CGF.EmitCXXDestructorCall(Temporary->getDestructor(),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ Addr);
+
+ if (CondPtr) {
+ // Reset the condition to false.
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
+ CondPtr);
+ CGF.EmitBlock(CondEnd);
+ }
+}
+/// Emits all the code to cause the given temporary to be cleaned up.
+void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
+ llvm::Value *Ptr) {
llvm::AllocaInst *CondPtr = 0;
// Check if temporaries need to be conditional. If so, we'll create a
@@ -38,82 +62,13 @@ void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CondPtr);
}
- LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock,
- CondPtr));
-
- PushCleanupBlock(DtorBlock);
+ CleanupBlock Cleanup(*this, NormalCleanup);
+ EmitTemporaryCleanup(*this, Temporary, Ptr, CondPtr);
if (Exceptions) {
- const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
- llvm::BasicBlock *CondEnd = 0;
-
- EHCleanupBlock Cleanup(*this);
-
- // If this is a conditional temporary, we need to check the condition
- // boolean and only call the destructor if it's true.
- if (Info.CondPtr) {
- llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
- CondEnd = createBasicBlock("cond.dtor.end");
-
- llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
- Builder.CreateCondBr(Cond, CondBlock, CondEnd);
- EmitBlock(CondBlock);
- }
-
- EmitCXXDestructorCall(Info.Temporary->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- Info.ThisPtr);
-
- if (CondEnd) {
- // Reset the condition. to false.
- Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
- EmitBlock(CondEnd);
- }
- }
-}
-
-void CodeGenFunction::PopCXXTemporary() {
- const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
-
- CleanupBlockInfo CleanupInfo = PopCleanupBlock();
- assert(CleanupInfo.CleanupBlock == Info.DtorBlock &&
- "Cleanup block mismatch!");
- assert(!CleanupInfo.SwitchBlock &&
- "Should not have a switch block for temporary cleanup!");
- assert(!CleanupInfo.EndBlock &&
- "Should not have an end block for temporary cleanup!");
-
- llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
- if (CurBB && !CurBB->getTerminator() &&
- Info.DtorBlock->getNumUses() == 0) {
- CurBB->getInstList().splice(CurBB->end(), Info.DtorBlock->getInstList());
- delete Info.DtorBlock;
- } else
- EmitBlock(Info.DtorBlock);
-
- llvm::BasicBlock *CondEnd = 0;
-
- // If this is a conditional temporary, we need to check the condition
- // boolean and only call the destructor if it's true.
- if (Info.CondPtr) {
- llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
- CondEnd = createBasicBlock("cond.dtor.end");
-
- llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
- Builder.CreateCondBr(Cond, CondBlock, CondEnd);
- EmitBlock(CondBlock);
- }
-
- EmitCXXDestructorCall(Info.Temporary->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false, Info.ThisPtr);
-
- if (CondEnd) {
- // Reset the condition. to false.
- Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
- EmitBlock(CondEnd);
+ Cleanup.beginEHCleanup();
+ EmitTemporaryCleanup(*this, Temporary, Ptr, CondPtr);
}
-
- LiveTemporaries.pop_back();
}
RValue
@@ -121,40 +76,23 @@ CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
llvm::Value *AggLoc,
bool IsAggLocVolatile,
bool IsInitializer) {
- // Keep track of the current cleanup stack depth.
- size_t CleanupStackDepth = CleanupEntries.size();
- (void) CleanupStackDepth;
-
RValue RV;
-
{
- CXXTemporariesCleanupScope Scope(*this);
+ RunCleanupsScope Scope(*this);
RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
/*IgnoreResult=*/false, IsInitializer);
}
- assert(CleanupEntries.size() == CleanupStackDepth &&
- "Cleanup size mismatch!");
-
return RV;
}
LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue(
const CXXExprWithTemporaries *E) {
- // Keep track of the current cleanup stack depth.
- size_t CleanupStackDepth = CleanupEntries.size();
- (void) CleanupStackDepth;
-
- unsigned OldNumLiveTemporaries = LiveTemporaries.size();
-
- LValue LV = EmitLValue(E->getSubExpr());
-
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
-
- assert(CleanupEntries.size() == CleanupStackDepth &&
- "Cleanup size mismatch!");
+ LValue LV;
+ {
+ RunCleanupsScope Scope(*this);
+ LV = EmitLValue(E->getSubExpr());
+ }
return LV;
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
index 0f023e6..6abac26 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -87,112 +87,61 @@ private:
/// MostDerivedClassLayout - the AST record layout of the most derived class.
const ASTRecordLayout &MostDerivedClassLayout;
- /// BaseSubobjectMethodPairTy - Uniquely identifies a member function
+ /// MethodBaseOffsetPairTy - Uniquely identifies a member function
/// in a base subobject.
- typedef std::pair<BaseSubobject, const CXXMethodDecl *>
- BaseSubobjectMethodPairTy;
-
- typedef llvm::DenseMap<BaseSubobjectMethodPairTy,
+ typedef std::pair<const CXXMethodDecl *, uint64_t> MethodBaseOffsetPairTy;
+
+ typedef llvm::DenseMap<MethodBaseOffsetPairTy,
OverriderInfo> OverridersMapTy;
/// OverridersMap - The final overriders for all virtual member functions of
/// all the base subobjects of the most derived class.
OverridersMapTy OverridersMap;
- /// VisitedVirtualBases - A set of all the visited virtual bases, used to
- /// avoid visiting virtual bases more than once.
- llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+ /// SubobjectsToOffsetsMapTy - A mapping from a base subobject (represented
+ /// as a record decl and a subobject number) and its offsets in the most
+ /// derived class as well as the layout class.
+ typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>,
+ uint64_t> SubobjectOffsetMapTy;
- typedef llvm::DenseMap<BaseSubobjectMethodPairTy, BaseOffset>
- AdjustmentOffsetsMapTy;
-
- /// ReturnAdjustments - Holds return adjustments for all the overriders that
- /// need to perform return value adjustments.
- AdjustmentOffsetsMapTy ReturnAdjustments;
-
- // FIXME: We might be able to get away with making this a SmallSet.
- typedef llvm::SmallSetVector<uint64_t, 2> OffsetSetVectorTy;
-
- /// SubobjectOffsetsMapTy - This map is used for keeping track of all the
- /// base subobject offsets that a single class declaration might refer to.
- ///
- /// For example, in:
- ///
- /// struct A { virtual void f(); };
- /// struct B1 : A { };
- /// struct B2 : A { };
- /// struct C : B1, B2 { virtual void f(); };
- ///
- /// when we determine that C::f() overrides A::f(), we need to update the
- /// overriders map for both A-in-B1 and A-in-B2 and the subobject offsets map
- /// will have the subobject offsets for both A copies.
- typedef llvm::DenseMap<const CXXRecordDecl *, OffsetSetVectorTy>
- SubobjectOffsetsMapTy;
-
- /// ComputeFinalOverriders - Compute the final overriders for a given base
- /// subobject (and all its direct and indirect bases).
- void ComputeFinalOverriders(BaseSubobject Base,
- bool BaseSubobjectIsVisitedVBase,
- uint64_t OffsetInLayoutClass,
- SubobjectOffsetsMapTy &Offsets);
+ typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy;
- /// AddOverriders - Add the final overriders for this base subobject to the
- /// map of final overriders.
- void AddOverriders(BaseSubobject Base, uint64_t OffsetInLayoutClass,
- SubobjectOffsetsMapTy &Offsets);
+ /// ComputeBaseOffsets - Compute the offsets for all base subobjects of the
+ /// given base.
+ void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ uint64_t OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts);
- /// PropagateOverrider - Propagate the NewMD overrider to all the functions
- /// that OldMD overrides. For example, if we have:
- ///
- /// struct A { virtual void f(); };
- /// struct B : A { virtual void f(); };
- /// struct C : B { virtual void f(); };
- ///
- /// and we want to override B::f with C::f, we also need to override A::f with
- /// C::f.
- void PropagateOverrider(const CXXMethodDecl *OldMD,
- BaseSubobject NewBase,
- uint64_t OverriderOffsetInLayoutClass,
- const CXXMethodDecl *NewMD,
- SubobjectOffsetsMapTy &Offsets);
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// dump - dump the final overriders for a base subobject, and all its direct
+ /// and indirect base subobjects.
+ void dump(llvm::raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy& VisitedVirtualBases);
- static void MergeSubobjectOffsets(const SubobjectOffsetsMapTy &NewOffsets,
- SubobjectOffsetsMapTy &Offsets);
-
public:
FinalOverriders(const CXXRecordDecl *MostDerivedClass,
uint64_t MostDerivedClassOffset,
const CXXRecordDecl *LayoutClass);
/// getOverrider - Get the final overrider for the given method declaration in
- /// the given base subobject.
- OverriderInfo getOverrider(BaseSubobject Base,
- const CXXMethodDecl *MD) const {
- assert(OverridersMap.count(std::make_pair(Base, MD)) &&
+ /// the subobject with the given base offset.
+ OverriderInfo getOverrider(const CXXMethodDecl *MD,
+ uint64_t BaseOffset) const {
+ assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) &&
"Did not find overrider!");
- return OverridersMap.lookup(std::make_pair(Base, MD));
+ return OverridersMap.lookup(std::make_pair(MD, BaseOffset));
}
- /// getReturnAdjustmentOffset - Get the return adjustment offset for the
- /// method decl in the given base subobject. Returns an empty base offset if
- /// no adjustment is needed.
- BaseOffset getReturnAdjustmentOffset(BaseSubobject Base,
- const CXXMethodDecl *MD) const {
- return ReturnAdjustments.lookup(std::make_pair(Base, MD));
- }
-
/// dump - dump the final overriders.
void dump() {
- assert(VisitedVirtualBases.empty() &&
- "Visited virtual bases aren't empty!");
- dump(llvm::errs(), BaseSubobject(MostDerivedClass, 0));
- VisitedVirtualBases.clear();
+ VisitedVirtualBasesSetTy VisitedVirtualBases;
+ dump(llvm::errs(), BaseSubobject(MostDerivedClass, 0), VisitedVirtualBases);
}
- /// dump - dump the final overriders for a base subobject, and all its direct
- /// and indirect base subobjects.
- void dump(llvm::raw_ostream &Out, BaseSubobject Base);
};
#define DUMP_OVERRIDERS 0
@@ -204,54 +153,57 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass),
Context(MostDerivedClass->getASTContext()),
MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) {
-
- // Compute the final overriders.
- SubobjectOffsetsMapTy Offsets;
- ComputeFinalOverriders(BaseSubobject(MostDerivedClass, 0),
- /*BaseSubobjectIsVisitedVBase=*/false,
- MostDerivedClassOffset, Offsets);
- VisitedVirtualBases.clear();
-#if DUMP_OVERRIDERS
- // And dump them (for now).
- dump();
-
- // Also dump the base offsets (for now).
- for (SubobjectOffsetsMapTy::const_iterator I = Offsets.begin(),
- E = Offsets.end(); I != E; ++I) {
- const OffsetSetVectorTy& OffsetSetVector = I->second;
+ // Compute base offsets.
+ SubobjectOffsetMapTy SubobjectOffsets;
+ SubobjectOffsetMapTy SubobjectLayoutClassOffsets;
+ SubobjectCountMapTy SubobjectCounts;
+ ComputeBaseOffsets(BaseSubobject(MostDerivedClass, 0), /*IsVirtual=*/false,
+ MostDerivedClassOffset, SubobjectOffsets,
+ SubobjectLayoutClassOffsets, SubobjectCounts);
- llvm::errs() << "Base offsets for ";
- llvm::errs() << I->first->getQualifiedNameAsString() << '\n';
+ // Get the the final overriders.
+ CXXFinalOverriderMap FinalOverriders;
+ MostDerivedClass->getFinalOverriders(FinalOverriders);
- for (unsigned I = 0, E = OffsetSetVector.size(); I != E; ++I)
- llvm::errs() << " " << I << " - " << OffsetSetVector[I] / 8 << '\n';
+ for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(),
+ E = FinalOverriders.end(); I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ const OverridingMethods& Methods = I->second;
+
+ for (OverridingMethods::const_iterator I = Methods.begin(),
+ E = Methods.end(); I != E; ++I) {
+ unsigned SubobjectNumber = I->first;
+ assert(SubobjectOffsets.count(std::make_pair(MD->getParent(),
+ SubobjectNumber)) &&
+ "Did not find subobject offset!");
+
+ uint64_t BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
+ SubobjectNumber)];
+
+ assert(I->second.size() == 1 && "Final overrider is not unique!");
+ const UniqueVirtualMethod &Method = I->second.front();
+
+ const CXXRecordDecl *OverriderRD = Method.Method->getParent();
+ assert(SubobjectLayoutClassOffsets.count(
+ std::make_pair(OverriderRD, Method.Subobject))
+ && "Did not find subobject offset!");
+ uint64_t OverriderOffset =
+ SubobjectLayoutClassOffsets[std::make_pair(OverriderRD,
+ Method.Subobject)];
+
+ OverriderInfo& Overrider = OverridersMap[std::make_pair(MD, BaseOffset)];
+ assert(!Overrider.Method && "Overrider should not exist yet!");
+
+ Overrider.Offset = OverriderOffset;
+ Overrider.Method = Method.Method;
+ }
}
-#endif
-}
-
-void FinalOverriders::AddOverriders(BaseSubobject Base,
- uint64_t OffsetInLayoutClass,
- SubobjectOffsetsMapTy &Offsets) {
- const CXXRecordDecl *RD = Base.getBase();
-
- for (CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
-
- if (!MD->isVirtual())
- continue;
- // First, propagate the overrider.
- PropagateOverrider(MD, Base, OffsetInLayoutClass, MD, Offsets);
-
- // Add the overrider as the final overrider of itself.
- OverriderInfo& Overrider = OverridersMap[std::make_pair(Base, MD)];
- assert(!Overrider.Method && "Overrider should not exist yet!");
-
- Overrider.Offset = OffsetInLayoutClass;
- Overrider.Method = MD;
- }
+#if DUMP_OVERRIDERS
+ // And dump them (for now).
+ dump();
+#endif
}
static BaseOffset ComputeBaseOffset(ASTContext &Context,
@@ -365,153 +317,64 @@ ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
return ComputeBaseOffset(Context, BaseRD, DerivedRD);
}
-void FinalOverriders::PropagateOverrider(const CXXMethodDecl *OldMD,
- BaseSubobject NewBase,
- uint64_t OverriderOffsetInLayoutClass,
- const CXXMethodDecl *NewMD,
- SubobjectOffsetsMapTy &Offsets) {
- for (CXXMethodDecl::method_iterator I = OldMD->begin_overridden_methods(),
- E = OldMD->end_overridden_methods(); I != E; ++I) {
- const CXXMethodDecl *OverriddenMD = *I;
- const CXXRecordDecl *OverriddenRD = OverriddenMD->getParent();
-
- // We want to override OverriddenMD in all subobjects, for example:
- //
- /// struct A { virtual void f(); };
- /// struct B1 : A { };
- /// struct B2 : A { };
- /// struct C : B1, B2 { virtual void f(); };
- ///
- /// When overriding A::f with C::f we need to do so in both A subobjects.
- const OffsetSetVectorTy &OffsetVector = Offsets[OverriddenRD];
-
- // Go through all the subobjects.
- for (unsigned I = 0, E = OffsetVector.size(); I != E; ++I) {
- uint64_t Offset = OffsetVector[I];
-
- BaseSubobject OverriddenSubobject = BaseSubobject(OverriddenRD, Offset);
- BaseSubobjectMethodPairTy SubobjectAndMethod =
- std::make_pair(OverriddenSubobject, OverriddenMD);
-
- OverriderInfo &Overrider = OverridersMap[SubobjectAndMethod];
-
- assert(Overrider.Method && "Did not find existing overrider!");
-
- // Check if we need return adjustments or base adjustments.
- // (We don't want to do this for pure virtual member functions).
- if (!NewMD->isPure()) {
- // Get the return adjustment base offset.
- BaseOffset ReturnBaseOffset =
- ComputeReturnAdjustmentBaseOffset(Context, NewMD, OverriddenMD);
-
- if (!ReturnBaseOffset.isEmpty()) {
- // Store the return adjustment base offset.
- ReturnAdjustments[SubobjectAndMethod] = ReturnBaseOffset;
- }
- }
-
- // Set the new overrider.
- Overrider.Offset = OverriderOffsetInLayoutClass;
- Overrider.Method = NewMD;
-
- // And propagate it further.
- PropagateOverrider(OverriddenMD, NewBase, OverriderOffsetInLayoutClass,
- NewMD, Offsets);
- }
- }
-}
-
void
-FinalOverriders::MergeSubobjectOffsets(const SubobjectOffsetsMapTy &NewOffsets,
- SubobjectOffsetsMapTy &Offsets) {
- // Iterate over the new offsets.
- for (SubobjectOffsetsMapTy::const_iterator I = NewOffsets.begin(),
- E = NewOffsets.end(); I != E; ++I) {
- const CXXRecordDecl *NewRD = I->first;
- const OffsetSetVectorTy& NewOffsetVector = I->second;
-
- OffsetSetVectorTy &OffsetVector = Offsets[NewRD];
-
- // Merge the new offsets set vector into the old.
- OffsetVector.insert(NewOffsetVector.begin(), NewOffsetVector.end());
- }
-}
-
-void FinalOverriders::ComputeFinalOverriders(BaseSubobject Base,
- bool BaseSubobjectIsVisitedVBase,
- uint64_t OffsetInLayoutClass,
- SubobjectOffsetsMapTy &Offsets) {
+FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ uint64_t OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts) {
const CXXRecordDecl *RD = Base.getBase();
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- SubobjectOffsetsMapTy NewOffsets;
+ unsigned SubobjectNumber = 0;
+ if (!IsVirtual)
+ SubobjectNumber = ++SubobjectCounts[RD];
+
+ // Set up the subobject to offset mapping.
+ assert(!SubobjectOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+ assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+
+ SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] =
+ Base.getBaseOffset();
+ SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] =
+ OffsetInLayoutClass;
+ // Traverse our bases.
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Ignore bases that don't have any virtual member functions.
- if (!BaseDecl->isPolymorphic())
- continue;
-
- bool IsVisitedVirtualBase = BaseSubobjectIsVisitedVBase;
+
uint64_t BaseOffset;
uint64_t BaseOffsetInLayoutClass;
if (I->isVirtual()) {
- if (!VisitedVirtualBases.insert(BaseDecl))
- IsVisitedVirtualBase = true;
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
-
+ // Check if we've visited this virtual base before.
+ if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
+ continue;
+
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
BaseOffsetInLayoutClass =
LayoutClassLayout.getVBaseClassOffset(BaseDecl);
} else {
- BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset();
- BaseOffsetInLayoutClass = Layout.getBaseClassOffset(BaseDecl) +
- OffsetInLayoutClass;
- }
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ uint64_t Offset = Layout.getBaseClassOffset(BaseDecl);
- // Compute the final overriders for this base.
- // We always want to compute the final overriders, even if the base is a
- // visited virtual base. Consider:
- //
- // struct A {
- // virtual void f();
- // virtual void g();
- // };
- //
- // struct B : virtual A {
- // void f();
- // };
- //
- // struct C : virtual A {
- // void g ();
- // };
- //
- // struct D : B, C { };
- //
- // Here, we still want to compute the overriders for A as a base of C,
- // because otherwise we'll miss that C::g overrides A::f.
- ComputeFinalOverriders(BaseSubobject(BaseDecl, BaseOffset),
- IsVisitedVirtualBase, BaseOffsetInLayoutClass,
- NewOffsets);
- }
-
- /// Now add the overriders for this particular subobject.
- /// (We don't want to do this more than once for a virtual base).
- if (!BaseSubobjectIsVisitedVBase)
- AddOverriders(Base, OffsetInLayoutClass, NewOffsets);
-
- // And merge the newly discovered subobject offsets.
- MergeSubobjectOffsets(NewOffsets, Offsets);
-
- /// Finally, add the offset for our own subobject.
- Offsets[RD].insert(Base.getBaseOffset());
+ BaseOffset = Base.getBaseOffset() + Offset;
+ BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
+ }
+
+ ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset), I->isVirtual(),
+ BaseOffsetInLayoutClass, SubobjectOffsets,
+ SubobjectLayoutClassOffsets, SubobjectCounts);
+ }
}
-void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
+void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy &VisitedVirtualBases) {
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -537,7 +400,7 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
Base.getBaseOffset();
}
- dump(Out, BaseSubobject(BaseDecl, BaseOffset));
+ dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
}
Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", ";
@@ -551,17 +414,17 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
if (!MD->isVirtual())
continue;
- OverriderInfo Overrider = getOverrider(Base, MD);
+ OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset());
Out << " " << MD->getQualifiedNameAsString() << " - (";
Out << Overrider.Method->getQualifiedNameAsString();
Out << ", " << ", " << Overrider.Offset / 8 << ')';
- AdjustmentOffsetsMapTy::const_iterator AI =
- ReturnAdjustments.find(std::make_pair(Base, MD));
- if (AI != ReturnAdjustments.end()) {
- const BaseOffset &Offset = AI->second;
+ BaseOffset Offset;
+ if (!Overrider.Method->isPure())
+ Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+ if (!Offset.isEmpty()) {
Out << " [ret-adj: ";
if (Offset.VirtualBase)
Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
@@ -1013,7 +876,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
if (Overriders) {
// Get the final overrider.
FinalOverriders::OverriderInfo Overrider =
- Overriders->getOverrider(Base, MD);
+ Overriders->getOverrider(MD, Base.getBaseOffset());
/// The vcall offset is the offset from the virtual base to the object
/// where the function was overridden.
@@ -1390,8 +1253,7 @@ void VTableBuilder::ComputeThisAdjustments() {
// Get the final overrider for this method.
FinalOverriders::OverriderInfo Overrider =
- Overriders.getOverrider(BaseSubobject(MD->getParent(),
- MethodInfo.BaseOffset), MD);
+ Overriders.getOverrider(MD, MethodInfo.BaseOffset);
// Check if we need an adjustment at all.
if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) {
@@ -1763,7 +1625,7 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
// Get the final overrider.
FinalOverriders::OverriderInfo Overrider =
- Overriders.getOverrider(Base, MD);
+ Overriders.getOverrider(MD, Base.getBaseOffset());
// Check if this virtual member function overrides a method in a primary
// base. If this is the case, and the return type doesn't require adjustment
@@ -1828,8 +1690,12 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
}
// Check if this overrider needs a return adjustment.
- BaseOffset ReturnAdjustmentOffset =
- Overriders.getReturnAdjustmentOffset(Base, MD);
+ // We don't want to do this for pure virtual member functions.
+ BaseOffset ReturnAdjustmentOffset;
+ if (!OverriderMD->isPure()) {
+ ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
+ }
ReturnAdjustment ReturnAdjustment =
ComputeReturnAdjustment(ReturnAdjustmentOffset);
@@ -2775,7 +2641,7 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
const CXXRecordDecl *RD = MD->getParent();
// Compute VTable related info for this class.
- ComputeVTableRelatedInformation(RD);
+ ComputeVTableRelatedInformation(RD, false);
ThunksMapTy::const_iterator I = Thunks.find(MD);
if (I == Thunks.end()) {
@@ -2788,24 +2654,30 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
EmitThunk(GD, ThunkInfoVector[I]);
}
-void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) {
- uint64_t *&LayoutData = VTableLayoutMap[RD];
+void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
+ bool RequireVTable) {
+ VTableLayoutData &Entry = VTableLayoutMap[RD];
+
+ // We may need to generate a definition for this vtable.
+ if (RequireVTable && !Entry.getInt()) {
+ if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) &&
+ RD->getTemplateSpecializationKind()
+ != TSK_ExplicitInstantiationDeclaration)
+ CGM.DeferredVTables.push_back(RD);
+
+ Entry.setInt(true);
+ }
// Check if we've computed this information before.
- if (LayoutData)
+ if (Entry.getPointer())
return;
- // We may need to generate a definition for this vtable.
- if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) &&
- RD->getTemplateSpecializationKind()
- != TSK_ExplicitInstantiationDeclaration)
- CGM.DeferredVTables.push_back(RD);
-
VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
// Add the VTable layout.
uint64_t NumVTableComponents = Builder.getNumVTableComponents();
- LayoutData = new uint64_t[NumVTableComponents + 1];
+ uint64_t *LayoutData = new uint64_t[NumVTableComponents + 1];
+ Entry.setPointer(LayoutData);
// Store the number of components.
LayoutData[0] = NumVTableComponents;
@@ -3020,7 +2892,7 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
CGM.getMangleContext().mangleCXXVTable(RD, OutName);
llvm::StringRef Name = OutName.str();
- ComputeVTableRelatedInformation(RD);
+ ComputeVTableRelatedInformation(RD, true);
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
@@ -3054,6 +2926,9 @@ CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
// Set the correct linkage.
VTable->setLinkage(Linkage);
+
+ // Set the right visibility.
+ CGM.setGlobalVisibility(VTable, RD);
}
llvm::GlobalVariable *
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
index e55377f..abcafd6 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
@@ -207,8 +207,12 @@ class CodeGenVTables {
/// Thunks - Contains all thunks that a given method decl will need.
ThunksMapTy Thunks;
-
- typedef llvm::DenseMap<const CXXRecordDecl *, uint64_t *> VTableLayoutMapTy;
+
+ // The layout entry and a bool indicating whether we've actually emitted
+ // the vtable.
+ typedef llvm::PointerIntPair<uint64_t *, 1, bool> VTableLayoutData;
+ typedef llvm::DenseMap<const CXXRecordDecl *, VTableLayoutData>
+ VTableLayoutMapTy;
/// VTableLayoutMap - Stores the vtable layout for all record decls.
/// The layout is stored as an array of 64-bit integers, where the first
@@ -237,13 +241,13 @@ class CodeGenVTables {
uint64_t getNumVTableComponents(const CXXRecordDecl *RD) const {
assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
- return VTableLayoutMap.lookup(RD)[0];
+ return VTableLayoutMap.lookup(RD).getPointer()[0];
}
const uint64_t *getVTableComponentsData(const CXXRecordDecl *RD) const {
assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
- uint64_t *Components = VTableLayoutMap.lookup(RD);
+ uint64_t *Components = VTableLayoutMap.lookup(RD).getPointer();
return &Components[1];
}
@@ -275,7 +279,8 @@ class CodeGenVTables {
/// ComputeVTableRelatedInformation - Compute and store all vtable related
/// information (vtable layout, vbase offset offsets, thunks etc) for the
/// given record decl.
- void ComputeVTableRelatedInformation(const CXXRecordDecl *RD);
+ void ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
+ bool VTableRequired);
/// CreateVTableInitializer - Create a vtable initializer for the given record
/// decl.
@@ -296,7 +301,7 @@ public:
const CXXRecordDecl *RD) {
assert (RD->isDynamicClass() && "Non dynamic classes have no key.");
const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
- return KeyFunction && !KeyFunction->getBody();
+ return KeyFunction && !KeyFunction->hasBody();
}
/// needsVTTParameter - Return whether the given global decl needs a VTT
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt b/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt
index a226400..b5a2329 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt
@@ -1,6 +1,7 @@
set(LLVM_NO_RTTI 1)
add_clang_library(clangCodeGen
+ BackendUtil.cpp
CGBlocks.cpp
CGBuiltin.cpp
CGCall.cpp
@@ -25,13 +26,16 @@ add_clang_library(clangCodeGen
CGTemporaries.cpp
CGVTables.cpp
CGVTT.cpp
+ CodeGenAction.cpp
CodeGenFunction.cpp
CodeGenModule.cpp
CodeGenTypes.cpp
ItaniumCXXABI.cpp
Mangle.cpp
+ MicrosoftCXXABI.cpp
ModuleBuilder.cpp
TargetInfo.cpp
)
-add_dependencies(clangCodeGen ClangStmtNodes)
+add_dependencies(clangCodeGen ClangAttrClasses ClangAttrList ClangDeclNodes
+ ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
new file mode 100644
index 0000000..51c55a1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
@@ -0,0 +1,348 @@
+//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/Timer.h"
+using namespace clang;
+using namespace llvm;
+
+namespace {
+ class BackendConsumer : public ASTConsumer {
+ Diagnostic &Diags;
+ BackendAction Action;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ llvm::raw_ostream *AsmOutStream;
+ ASTContext *Context;
+
+ Timer LLVMIRGeneration;
+
+ llvm::OwningPtr<CodeGenerator> Gen;
+
+ llvm::OwningPtr<llvm::Module> TheModule;
+
+ public:
+ BackendConsumer(BackendAction action, Diagnostic &_Diags,
+ const CodeGenOptions &compopts,
+ const TargetOptions &targetopts, bool TimePasses,
+ const std::string &infile, llvm::raw_ostream *OS,
+ LLVMContext &C) :
+ Diags(_Diags),
+ Action(action),
+ CodeGenOpts(compopts),
+ TargetOpts(targetopts),
+ AsmOutStream(OS),
+ LLVMIRGeneration("LLVM IR Generation Time"),
+ Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)) {
+ llvm::TimePassesIsEnabled = TimePasses;
+ }
+
+ llvm::Module *takeModule() { return TheModule.take(); }
+
+ virtual void Initialize(ASTContext &Ctx) {
+ Context = &Ctx;
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->Initialize(Ctx);
+
+ TheModule.reset(Gen->GetModule());
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTopLevelDecl(D);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &C) {
+ {
+ PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTranslationUnit(C);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ // Silently ignore if we weren't initialized for some reason.
+ if (!TheModule)
+ return;
+
+ // Make sure IR generation is happy with the module. This is released by
+ // the module provider.
+ Module *M = Gen->ReleaseModule();
+ if (!M) {
+ // The module has been released by IR gen on failures, do not double
+ // free.
+ TheModule.take();
+ return;
+ }
+
+ assert(TheModule.get() == M &&
+ "Unexpected module change during IR generation");
+
+ // Install an inline asm handler so that diagnostics get printed through
+ // our diagnostics hooks.
+ LLVMContext &Ctx = TheModule->getContext();
+ void *OldHandler = Ctx.getInlineAsmDiagnosticHandler();
+ void *OldContext = Ctx.getInlineAsmDiagnosticContext();
+ Ctx.setInlineAsmDiagnosticHandler((void*)(intptr_t)InlineAsmDiagHandler,
+ this);
+
+ EmitBackendOutput(Diags, CodeGenOpts, TargetOpts,
+ TheModule.get(), Action, AsmOutStream);
+
+ Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
+ }
+
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+ Gen->HandleTagDeclDefinition(D);
+ }
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ Gen->CompleteTentativeDefinition(D);
+ }
+
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+ Gen->HandleVTable(RD, DefinitionRequired);
+ }
+
+ static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context,
+ unsigned LocCookie) {
+ SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie);
+ ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
+ }
+
+ void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
+ SourceLocation LocCookie);
+ };
+}
+
+/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr
+/// buffer to be a valid FullSourceLoc.
+static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
+ SourceManager &CSM) {
+ // Get both the clang and llvm source managers. The location is relative to
+ // a memory buffer that the LLVM Source Manager is handling, we need to add
+ // a copy to the Clang source manager.
+ const llvm::SourceMgr &LSM = *D.getSourceMgr();
+
+ // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr
+ // already owns its one and clang::SourceManager wants to own its one.
+ const MemoryBuffer *LBuf =
+ LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
+
+ // Create the copy and transfer ownership to clang::SourceManager.
+ llvm::MemoryBuffer *CBuf =
+ llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(),
+ LBuf->getBufferIdentifier());
+ FileID FID = CSM.createFileIDForMemBuffer(CBuf);
+
+ // Translate the offset into the file.
+ unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
+ SourceLocation NewLoc =
+ CSM.getLocForStartOfFile(FID).getFileLocWithOffset(Offset);
+ return FullSourceLoc(NewLoc, CSM);
+}
+
+
+/// InlineAsmDiagHandler2 - This function is invoked when the backend hits an
+/// error parsing inline asm. The SMDiagnostic indicates the error relative to
+/// the temporary memory buffer that the inline asm parser has set up.
+void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
+ SourceLocation LocCookie) {
+ // There are a couple of different kinds of errors we could get here. First,
+ // we re-format the SMDiagnostic in terms of a clang diagnostic.
+
+ // Strip "error: " off the start of the message string.
+ llvm::StringRef Message = D.getMessage();
+ if (Message.startswith("error: "))
+ Message = Message.substr(7);
+
+ // If the SMDiagnostic has an inline asm source location, translate it.
+ FullSourceLoc Loc;
+ if (D.getLoc() != SMLoc())
+ Loc = ConvertBackendLocation(D, Context->getSourceManager());
+
+
+ // If this problem has clang-level source location information, report the
+ // issue as being an error in the source with a note showing the instantiated
+ // code.
+ if (LocCookie.isValid()) {
+ Diags.Report(FullSourceLoc(LocCookie, Context->getSourceManager()),
+ diag::err_fe_inline_asm).AddString(Message);
+
+ if (D.getLoc().isValid())
+ Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ return;
+ }
+
+ // Otherwise, report the backend error as occuring in the generated .s file.
+ // If Loc is invalid, we still need to report the error, it just gets no
+ // location info.
+ Diags.Report(Loc, diag::err_fe_inline_asm).AddString(Message);
+}
+
+//
+
+CodeGenAction::CodeGenAction(unsigned _Act) : Act(_Act) {}
+
+CodeGenAction::~CodeGenAction() {}
+
+bool CodeGenAction::hasIRSupport() const { return true; }
+
+void CodeGenAction::EndSourceFileAction() {
+ // If the consumer creation failed, do nothing.
+ if (!getCompilerInstance().hasASTConsumer())
+ return;
+
+ // Steal the module from the consumer.
+ BackendConsumer *Consumer = static_cast<BackendConsumer*>(
+ &getCompilerInstance().getASTConsumer());
+
+ TheModule.reset(Consumer->takeModule());
+}
+
+llvm::Module *CodeGenAction::takeModule() {
+ return TheModule.take();
+}
+
+static raw_ostream *GetOutputStream(CompilerInstance &CI,
+ llvm::StringRef InFile,
+ BackendAction Action) {
+ switch (Action) {
+ case Backend_EmitAssembly:
+ return CI.createDefaultOutputFile(false, InFile, "s");
+ case Backend_EmitLL:
+ return CI.createDefaultOutputFile(false, InFile, "ll");
+ case Backend_EmitBC:
+ return CI.createDefaultOutputFile(true, InFile, "bc");
+ case Backend_EmitNothing:
+ return 0;
+ case Backend_EmitMCNull:
+ case Backend_EmitObj:
+ return CI.createDefaultOutputFile(true, InFile, "o");
+ }
+
+ assert(0 && "Invalid action!");
+ return 0;
+}
+
+ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ llvm::OwningPtr<llvm::raw_ostream> OS(GetOutputStream(CI, InFile, BA));
+ if (BA != Backend_EmitNothing && !OS)
+ return 0;
+
+ return new BackendConsumer(BA, CI.getDiagnostics(),
+ CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
+ CI.getLLVMContext());
+}
+
+void CodeGenAction::ExecuteAction() {
+ // If this is an IR file, we have to treat it specially.
+ if (getCurrentFileKind() == IK_LLVM_IR) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = GetOutputStream(CI, getCurrentFile(), BA);
+ if (BA != Backend_EmitNothing && !OS)
+ return;
+
+ bool Invalid;
+ SourceManager &SM = CI.getSourceManager();
+ const llvm::MemoryBuffer *MainFile = SM.getBuffer(SM.getMainFileID(),
+ &Invalid);
+ if (Invalid)
+ return;
+
+ // FIXME: This is stupid, IRReader shouldn't take ownership.
+ llvm::MemoryBuffer *MainFileCopy =
+ llvm::MemoryBuffer::getMemBufferCopy(MainFile->getBuffer(),
+ getCurrentFile().c_str());
+
+ llvm::SMDiagnostic Err;
+ TheModule.reset(ParseIR(MainFileCopy, Err, CI.getLLVMContext()));
+ if (!TheModule) {
+ // Translate from the diagnostic info to the SourceManager location.
+ SourceLocation Loc = SM.getLocation(
+ SM.getFileEntryForID(SM.getMainFileID()), Err.getLineNo(),
+ Err.getColumnNo() + 1);
+
+ // Get a custom diagnostic for the error. We strip off a leading
+ // diagnostic code if there is one.
+ llvm::StringRef Msg = Err.getMessage();
+ if (Msg.startswith("error: "))
+ Msg = Msg.substr(7);
+ unsigned DiagID = CI.getDiagnostics().getCustomDiagID(Diagnostic::Error,
+ Msg);
+
+ CI.getDiagnostics().Report(FullSourceLoc(Loc, SM), DiagID);
+ return;
+ }
+
+ EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(),
+ CI.getTargetOpts(), TheModule.get(),
+ BA, OS);
+ return;
+ }
+
+ // Otherwise follow the normal AST path.
+ this->ASTFrontendAction::ExecuteAction();
+}
+
+//
+
+EmitAssemblyAction::EmitAssemblyAction()
+ : CodeGenAction(Backend_EmitAssembly) {}
+
+EmitBCAction::EmitBCAction() : CodeGenAction(Backend_EmitBC) {}
+
+EmitLLVMAction::EmitLLVMAction() : CodeGenAction(Backend_EmitLL) {}
+
+EmitLLVMOnlyAction::EmitLLVMOnlyAction() : CodeGenAction(Backend_EmitNothing) {}
+
+EmitCodeGenOnlyAction::EmitCodeGenOnlyAction() : CodeGenAction(Backend_EmitMCNull) {}
+
+EmitObjAction::EmitObjAction() : CodeGenAction(Backend_EmitObj) {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
index 73de0fd..eb6c436 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -14,13 +14,16 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "CGDebugInfo.h"
+#include "CGException.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Intrinsics.h"
using namespace clang;
using namespace CodeGen;
@@ -28,13 +31,20 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: BlockFunction(cgm, *this, Builder), CGM(cgm),
Target(CGM.getContext().Target),
Builder(cgm.getModule().getContext()),
- DebugInfo(0), IndirectBranch(0),
+ ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
+ DidCallStackSave(false), UnreachableBlock(0),
CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
- ConditionalBranchLevel(0), TerminateHandler(0), TrapBB(0),
- UniqueAggrDestructorCount(0) {
- LLVMIntTy = ConvertType(getContext().IntTy);
+ ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
+ TrapBB(0) {
+
+ // Get some frequently used types.
LLVMPointerWidth = Target.getPointerWidth(0);
+ llvm::LLVMContext &LLVMContext = CGM.getLLVMContext();
+ IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+
Exceptions = getContext().getLangOptions().Exceptions;
CatchUndefined = getContext().getLangOptions().CatchUndefined;
CGM.getMangleContext().startNewFunction();
@@ -45,14 +55,6 @@ ASTContext &CodeGenFunction::getContext() const {
}
-llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) {
- llvm::BasicBlock *&BB = LabelMap[S];
- if (BB) return BB;
-
- // Create, but don't insert, the new block.
- return BB = createBasicBlock(S->getName());
-}
-
llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
llvm::Value *Res = LocalDeclMap[VD];
assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
@@ -87,25 +89,26 @@ void CodeGenFunction::EmitReturnBlock() {
// We have a valid insert point, reuse it if it is empty or there are no
// explicit jumps to the return block.
- if (CurBB->empty() || ReturnBlock->use_empty()) {
- ReturnBlock->replaceAllUsesWith(CurBB);
- delete ReturnBlock;
+ if (CurBB->empty() || ReturnBlock.Block->use_empty()) {
+ ReturnBlock.Block->replaceAllUsesWith(CurBB);
+ delete ReturnBlock.Block;
} else
- EmitBlock(ReturnBlock);
+ EmitBlock(ReturnBlock.Block);
return;
}
// Otherwise, if the return block is the target of a single direct
// branch then we can just put the code in that block instead. This
// cleans up functions which started with a unified return block.
- if (ReturnBlock->hasOneUse()) {
+ if (ReturnBlock.Block->hasOneUse()) {
llvm::BranchInst *BI =
- dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin());
- if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) {
+ dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin());
+ if (BI && BI->isUnconditional() &&
+ BI->getSuccessor(0) == ReturnBlock.Block) {
// Reset insertion point and delete the branch.
Builder.SetInsertPoint(BI->getParent());
BI->eraseFromParent();
- delete ReturnBlock;
+ delete ReturnBlock.Block;
return;
}
}
@@ -114,29 +117,37 @@ void CodeGenFunction::EmitReturnBlock() {
// unless it has uses. However, we still need a place to put the debug
// region.end for now.
- EmitBlock(ReturnBlock);
+ EmitBlock(ReturnBlock.Block);
+}
+
+static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
+ if (!BB) return;
+ if (!BB->use_empty())
+ return CGF.CurFn->getBasicBlockList().push_back(BB);
+ delete BB;
}
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
assert(BreakContinueStack.empty() &&
"mismatched push/pop in break/continue stack!");
- assert(BlockScopes.empty() &&
- "did not remove all blocks from block scope map!");
- assert(CleanupEntries.empty() &&
- "mismatched push/pop in cleanup stack!");
// Emit function epilog (to return).
EmitReturnBlock();
+ EmitFunctionInstrumentation("__cyg_profile_func_exit");
+
// Emit debug descriptor for function end.
if (CGDebugInfo *DI = getDebugInfo()) {
DI->setLocation(EndLoc);
DI->EmitRegionEnd(CurFn, Builder);
}
- EmitFunctionEpilog(*CurFnInfo, ReturnValue);
+ EmitFunctionEpilog(*CurFnInfo);
EmitEndEHSpec(CurCodeDecl);
+ assert(EHStack.empty() &&
+ "did not remove all scopes from cleanup stack!");
+
// If someone did an indirect goto, emit the indirect goto block at the end of
// the function.
if (IndirectBranch) {
@@ -158,6 +169,53 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
PN->eraseFromParent();
}
}
+
+ EmitIfUsed(*this, TerminateLandingPad);
+ EmitIfUsed(*this, TerminateHandler);
+ EmitIfUsed(*this, UnreachableBlock);
+
+ if (CGM.getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
+}
+
+/// ShouldInstrumentFunction - Return true if the current function should be
+/// instrumented with __cyg_profile_func_* calls
+bool CodeGenFunction::ShouldInstrumentFunction() {
+ if (!CGM.getCodeGenOpts().InstrumentFunctions)
+ return false;
+ if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
+ return false;
+ return true;
+}
+
+/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+/// instrumentation function with the current function and the call site, if
+/// function instrumentation is enabled.
+void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
+ if (!ShouldInstrumentFunction())
+ return;
+
+ const llvm::PointerType *PointerTy;
+ const llvm::FunctionType *FunctionTy;
+ std::vector<const llvm::Type*> ProfileFuncArgs;
+
+ // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
+ PointerTy = llvm::Type::getInt8PtrTy(VMContext);
+ ProfileFuncArgs.push_back(PointerTy);
+ ProfileFuncArgs.push_back(PointerTy);
+ FunctionTy = llvm::FunctionType::get(
+ llvm::Type::getVoidTy(VMContext),
+ ProfileFuncArgs, false);
+
+ llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
+ llvm::CallInst *CallSite = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
+ llvm::ConstantInt::get(Int32Ty, 0),
+ "callsite");
+
+ Builder.CreateCall2(F,
+ llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
+ CallSite);
}
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
@@ -187,14 +245,12 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// Create a marker to make it easy to insert allocas into the entryblock
// later. Don't create this with the builder, because we don't want it
// folded.
- llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext));
- AllocaInsertPt = new llvm::BitCastInst(Undef,
- llvm::Type::getInt32Ty(VMContext), "",
- EntryBB);
+ llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
+ AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
if (Builder.isNamePreserving())
AllocaInsertPt->setName("allocapt");
- ReturnBlock = createBasicBlock("return");
+ ReturnBlock = getJumpDestInCurrentScope("return");
Builder.SetInsertPoint(EntryBB);
@@ -209,6 +265,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
}
+ EmitFunctionInstrumentation("__cyg_profile_func_enter");
+
// FIXME: Leaked.
// CC info is ignored, hopefully?
CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
@@ -513,15 +571,11 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
return;
// FIXME: Handle variable sized types.
- const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext,
- LLVMPointerWidth);
-
- Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtr), DestPtr,
+ Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
// TypeInfo.first describes size in bits.
- llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- TypeInfo.second/8),
+ llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
+ llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
0));
}
@@ -531,7 +585,7 @@ llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
if (IndirectBranch == 0)
GetIndirectGotoBlock();
- llvm::BasicBlock *BB = getBasicBlockForLabel(L);
+ llvm::BasicBlock *BB = getJumpDestForLabel(L).Block;
// Make sure the indirect branch includes all of the address-taken blocks.
IndirectBranch->addDestination(BB);
@@ -603,233 +657,574 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
}
llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
- if (CGM.getContext().getBuiltinVaListType()->isArrayType()) {
+ if (CGM.getContext().getBuiltinVaListType()->isArrayType())
return EmitScalarExpr(E);
- }
return EmitLValue(E).getAddress();
}
-void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock,
- llvm::BasicBlock *CleanupExitBlock,
- llvm::BasicBlock *PreviousInvokeDest,
- bool EHOnly) {
- CleanupEntries.push_back(CleanupEntry(CleanupEntryBlock, CleanupExitBlock,
- PreviousInvokeDest, EHOnly));
+/// Pops cleanup blocks until the given savepoint is reached.
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+ assert(Old.isValid());
+
+ EHScopeStack::iterator E = EHStack.find(Old);
+ while (EHStack.begin() != E)
+ PopCleanupBlock();
+}
+
+/// Destroys a cleanup if it was unused.
+static void DestroyCleanup(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry,
+ llvm::BasicBlock *Exit) {
+ assert(Entry->use_empty() && "destroying cleanup with uses!");
+ assert(Exit->getTerminator() == 0 &&
+ "exit has terminator but entry has no predecessors!");
+
+ // This doesn't always remove the entire cleanup, but it's much
+ // safer as long as we don't know what blocks belong to the cleanup.
+ // A *much* better approach if we care about this inefficiency would
+ // be to lazily emit the cleanup.
+
+ // If the exit block is distinct from the entry, give it a branch to
+ // an unreachable destination. This preserves the well-formedness
+ // of the IR.
+ if (Entry != Exit)
+ llvm::BranchInst::Create(CGF.getUnreachableBlock(), Exit);
+
+ assert(!Entry->getParent() && "cleanup entry already positioned?");
+ // We can't just delete the entry; we have to kill any references to
+ // its instructions in other blocks.
+ for (llvm::BasicBlock::iterator I = Entry->begin(), E = Entry->end();
+ I != E; ++I)
+ if (!I->use_empty())
+ I->replaceAllUsesWith(llvm::UndefValue::get(I->getType()));
+ delete Entry;
}
-void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize) {
- assert(CleanupEntries.size() >= OldCleanupStackSize &&
- "Cleanup stack mismatch!");
+/// Creates a switch instruction to thread branches out of the given
+/// block (which is the exit block of a cleanup).
+static void CreateCleanupSwitch(CodeGenFunction &CGF,
+ llvm::BasicBlock *Block) {
+ if (Block->getTerminator()) {
+ assert(isa<llvm::SwitchInst>(Block->getTerminator()) &&
+ "cleanup block already has a terminator, but it isn't a switch");
+ return;
+ }
- while (CleanupEntries.size() > OldCleanupStackSize)
- EmitCleanupBlock();
+ llvm::Value *DestCodePtr
+ = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst");
+ CGBuilderTy Builder(Block);
+ llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+
+ // Create a switch instruction to determine where to jump next.
+ Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock());
}
-CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() {
- CleanupEntry &CE = CleanupEntries.back();
+/// Attempts to reduce a cleanup's entry block to a fallthrough. This
+/// is basically llvm::MergeBlockIntoPredecessor, except
+/// simplified/optimized for the tighter constraints on cleanup
+/// blocks.
+static void SimplifyCleanupEntry(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry) {
+ llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
+ if (!Pred) return;
- llvm::BasicBlock *CleanupEntryBlock = CE.CleanupEntryBlock;
+ llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
+ if (!Br || Br->isConditional()) return;
+ assert(Br->getSuccessor(0) == Entry);
- std::vector<llvm::BasicBlock *> Blocks;
- std::swap(Blocks, CE.Blocks);
+ // If we were previously inserting at the end of the cleanup entry
+ // block, we'll need to continue inserting at the end of the
+ // predecessor.
+ bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
+ assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
- std::vector<llvm::BranchInst *> BranchFixups;
- std::swap(BranchFixups, CE.BranchFixups);
+ // Kill the branch.
+ Br->eraseFromParent();
- bool EHOnly = CE.EHOnly;
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
- setInvokeDest(CE.PreviousInvokeDest);
+ // Kill the entry block.
+ Entry->eraseFromParent();
- CleanupEntries.pop_back();
+ if (WasInsertBlock)
+ CGF.Builder.SetInsertPoint(Pred);
+}
- // Check if any branch fixups pointed to the scope we just popped. If so,
- // we can remove them.
- for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
- llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0);
- BlockScopeMap::iterator I = BlockScopes.find(Dest);
+/// Attempts to reduce an cleanup's exit switch to an unconditional
+/// branch.
+static void SimplifyCleanupExit(llvm::BasicBlock *Exit) {
+ llvm::TerminatorInst *Terminator = Exit->getTerminator();
+ assert(Terminator && "completed cleanup exit has no terminator");
- if (I == BlockScopes.end())
- continue;
+ llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator);
+ if (!Switch) return;
+ if (Switch->getNumCases() != 2) return; // default + 1
- assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!");
+ llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition());
+ llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand());
- if (I->second == CleanupEntries.size()) {
- // We don't need to do this branch fixup.
- BranchFixups[i] = BranchFixups.back();
- BranchFixups.pop_back();
- i--;
- e--;
- continue;
- }
- }
+ // Replace the switch instruction with an unconditional branch.
+ llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0
+ Switch->eraseFromParent();
+ llvm::BranchInst::Create(Dest, Exit);
- llvm::BasicBlock *SwitchBlock = CE.CleanupExitBlock;
- llvm::BasicBlock *EndBlock = 0;
- if (!BranchFixups.empty()) {
- if (!SwitchBlock)
- SwitchBlock = createBasicBlock("cleanup.switch");
- EndBlock = createBasicBlock("cleanup.end");
+ // Delete all uses of the condition variable.
+ Cond->eraseFromParent();
+ while (!CondVar->use_empty())
+ cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent();
- llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+ // Delete the condition variable itself.
+ CondVar->eraseFromParent();
+}
- Builder.SetInsertPoint(SwitchBlock);
+/// Threads a branch fixup through a cleanup block.
+static void ThreadFixupThroughCleanup(CodeGenFunction &CGF,
+ BranchFixup &Fixup,
+ llvm::BasicBlock *Entry,
+ llvm::BasicBlock *Exit) {
+ if (!Exit->getTerminator())
+ CreateCleanupSwitch(CGF, Exit);
- llvm::Value *DestCodePtr
- = CreateTempAlloca(llvm::Type::getInt32Ty(VMContext),
- "cleanup.dst");
- llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+ // Find the switch and its destination index alloca.
+ llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator());
+ llvm::Value *DestCodePtr =
+ cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand();
- // Create a switch instruction to determine where to jump next.
- llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock,
- BranchFixups.size());
+ // Compute the index of the new case we're adding to the switch.
+ unsigned Index = Switch->getNumCases();
- // Restore the current basic block (if any)
- if (CurBB) {
- Builder.SetInsertPoint(CurBB);
+ const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext());
+ llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index);
- // If we had a current basic block, we also need to emit an instruction
- // to initialize the cleanup destination.
- Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)),
- DestCodePtr);
- } else
- Builder.ClearInsertionPoint();
-
- for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
- llvm::BranchInst *BI = BranchFixups[i];
- llvm::BasicBlock *Dest = BI->getSuccessor(0);
-
- // Fixup the branch instruction to point to the cleanup block.
- BI->setSuccessor(0, CleanupEntryBlock);
-
- if (CleanupEntries.empty()) {
- llvm::ConstantInt *ID;
-
- // Check if we already have a destination for this block.
- if (Dest == SI->getDefaultDest())
- ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
- else {
- ID = SI->findCaseDest(Dest);
- if (!ID) {
- // No code found, get a new unique one by using the number of
- // switch successors.
- ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- SI->getNumSuccessors());
- SI->addCase(ID, Dest);
- }
- }
+ // Set the index in the origin block.
+ new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin);
- // Store the jump destination before the branch instruction.
- new llvm::StoreInst(ID, DestCodePtr, BI);
- } else {
- // We need to jump through another cleanup block. Create a pad block
- // with a branch instruction that jumps to the final destination and add
- // it as a branch fixup to the current cleanup scope.
+ // Add a case to the switch.
+ Switch->addCase(IndexV, Fixup.Destination);
- // Create the pad block.
- llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn);
+ // Change the last branch to point to the cleanup entry block.
+ Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry);
- // Create a unique case ID.
- llvm::ConstantInt *ID
- = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- SI->getNumSuccessors());
+ // And finally, update the fixup.
+ Fixup.LatestBranch = Switch;
+ Fixup.LatestBranchIndex = Index;
+}
- // Store the jump destination before the branch instruction.
- new llvm::StoreInst(ID, DestCodePtr, BI);
+/// Try to simplify both the entry and exit edges of a cleanup.
+static void SimplifyCleanupEdges(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry,
+ llvm::BasicBlock *Exit) {
- // Add it as the destination.
- SI->addCase(ID, CleanupPad);
+ // Given their current implementations, it's important to run these
+ // in this order: SimplifyCleanupEntry will delete Entry if it can
+ // be merged into its predecessor, which will then break
+ // SimplifyCleanupExit if (as is common) Entry == Exit.
- // Create the branch to the final destination.
- llvm::BranchInst *BI = llvm::BranchInst::Create(Dest);
- CleanupPad->getInstList().push_back(BI);
+ SimplifyCleanupExit(Exit);
+ SimplifyCleanupEntry(CGF, Entry);
+}
- // And add it as a branch fixup.
- CleanupEntries.back().BranchFixups.push_back(BI);
- }
- }
+static void EmitLazyCleanup(CodeGenFunction &CGF,
+ EHScopeStack::LazyCleanup *Fn,
+ bool ForEH) {
+ if (ForEH) CGF.EHStack.pushTerminate();
+ Fn->Emit(CGF, ForEH);
+ if (ForEH) CGF.EHStack.popTerminate();
+ assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
+}
+
+static void SplitAndEmitLazyCleanup(CodeGenFunction &CGF,
+ EHScopeStack::LazyCleanup *Fn,
+ bool ForEH,
+ llvm::BasicBlock *Entry) {
+ assert(Entry && "no entry block for cleanup");
+
+ // Remove the switch and load from the end of the entry block.
+ llvm::Instruction *Switch = &Entry->getInstList().back();
+ Entry->getInstList().remove(Switch);
+ assert(isa<llvm::SwitchInst>(Switch));
+ llvm::Instruction *Load = &Entry->getInstList().back();
+ Entry->getInstList().remove(Load);
+ assert(isa<llvm::LoadInst>(Load));
+
+ assert(Entry->getInstList().empty() &&
+ "lazy cleanup block not empty after removing load/switch pair?");
+
+ // Emit the actual cleanup at the end of the entry block.
+ CGF.Builder.SetInsertPoint(Entry);
+ EmitLazyCleanup(CGF, Fn, ForEH);
+
+ // Put the load and switch at the end of the exit block.
+ llvm::BasicBlock *Exit = CGF.Builder.GetInsertBlock();
+ Exit->getInstList().push_back(Load);
+ Exit->getInstList().push_back(Switch);
+
+ // Clean up the edges if possible.
+ SimplifyCleanupEdges(CGF, Entry, Exit);
+
+ CGF.Builder.ClearInsertionPoint();
+}
+
+static void PopLazyCleanupBlock(CodeGenFunction &CGF) {
+ assert(isa<EHLazyCleanupScope>(*CGF.EHStack.begin()) && "top not a cleanup!");
+ EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*CGF.EHStack.begin());
+ assert(Scope.getFixupDepth() <= CGF.EHStack.getNumBranchFixups());
+
+ // Check whether we need an EH cleanup. This is only true if we've
+ // generated a lazy EH cleanup block.
+ llvm::BasicBlock *EHEntry = Scope.getEHBlock();
+ bool RequiresEHCleanup = (EHEntry != 0);
+
+ // Check the three conditions which might require a normal cleanup:
+
+ // - whether there are branch fix-ups through this cleanup
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = CGF.EHStack.getNumBranchFixups() != FixupDepth;
+
+ // - whether control has already been threaded through this cleanup
+ llvm::BasicBlock *NormalEntry = Scope.getNormalBlock();
+ bool HasExistingBranches = (NormalEntry != 0);
+
+ // - whether there's a fallthrough
+ llvm::BasicBlock *FallthroughSource = CGF.Builder.GetInsertBlock();
+ bool HasFallthrough = (FallthroughSource != 0);
+
+ bool RequiresNormalCleanup = false;
+ if (Scope.isNormalCleanup() &&
+ (HasFixups || HasExistingBranches || HasFallthrough)) {
+ RequiresNormalCleanup = true;
}
- // Remove all blocks from the block scope map.
- for (size_t i = 0, e = Blocks.size(); i != e; ++i) {
- assert(BlockScopes.count(Blocks[i]) &&
- "Did not find block in scope map!");
+ // If we don't need the cleanup at all, we're done.
+ if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+ CGF.EHStack.popCleanup();
+ assert(CGF.EHStack.getNumBranchFixups() == 0 ||
+ CGF.EHStack.hasNormalCleanups());
+ return;
+ }
+
+ // Copy the cleanup emission data out. Note that SmallVector
+ // guarantees maximal alignment for its buffer regardless of its
+ // type parameter.
+ llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
+ CleanupBuffer.reserve(Scope.getCleanupSize());
+ memcpy(CleanupBuffer.data(),
+ Scope.getCleanupBuffer(), Scope.getCleanupSize());
+ CleanupBuffer.set_size(Scope.getCleanupSize());
+ EHScopeStack::LazyCleanup *Fn =
+ reinterpret_cast<EHScopeStack::LazyCleanup*>(CleanupBuffer.data());
+
+ // We're done with the scope; pop it off so we can emit the cleanups.
+ CGF.EHStack.popCleanup();
+
+ if (RequiresNormalCleanup) {
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (HasFallthrough && !HasFixups && !HasExistingBranches) {
+ EmitLazyCleanup(CGF, Fn, /*ForEH*/ false);
+
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+ } else {
+ // Force the entry block to exist.
+ if (!HasExistingBranches) {
+ NormalEntry = CGF.createBasicBlock("cleanup");
+ CreateCleanupSwitch(CGF, NormalEntry);
+ }
- BlockScopes.erase(Blocks[i]);
+ CGF.EmitBlock(NormalEntry);
+
+ // Thread the fallthrough edge through the (momentarily trivial)
+ // cleanup.
+ llvm::BasicBlock *FallthroughDestination = 0;
+ if (HasFallthrough) {
+ assert(isa<llvm::BranchInst>(FallthroughSource->getTerminator()));
+ FallthroughDestination = CGF.createBasicBlock("cleanup.cont");
+
+ BranchFixup Fix;
+ Fix.Destination = FallthroughDestination;
+ Fix.LatestBranch = FallthroughSource->getTerminator();
+ Fix.LatestBranchIndex = 0;
+ Fix.Origin = Fix.LatestBranch;
+
+ // Restore fixup invariant. EmitBlock added a branch to the
+ // cleanup which we need to redirect to the destination.
+ cast<llvm::BranchInst>(Fix.LatestBranch)
+ ->setSuccessor(0, Fix.Destination);
+
+ ThreadFixupThroughCleanup(CGF, Fix, NormalEntry, NormalEntry);
+ }
+
+ // Thread any "real" fixups we need to thread.
+ for (unsigned I = FixupDepth, E = CGF.EHStack.getNumBranchFixups();
+ I != E; ++I)
+ if (CGF.EHStack.getBranchFixup(I).Destination)
+ ThreadFixupThroughCleanup(CGF, CGF.EHStack.getBranchFixup(I),
+ NormalEntry, NormalEntry);
+
+ SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ false, NormalEntry);
+
+ if (HasFallthrough)
+ CGF.EmitBlock(FallthroughDestination);
+ }
}
- return CleanupBlockInfo(CleanupEntryBlock, SwitchBlock, EndBlock, EHOnly);
+ // Emit the EH cleanup if required.
+ if (RequiresEHCleanup) {
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(EHEntry);
+ SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ true, EHEntry);
+ CGF.Builder.restoreIP(SavedIP);
+ }
}
-void CodeGenFunction::EmitCleanupBlock() {
- CleanupBlockInfo Info = PopCleanupBlock();
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CodeGenFunction::PopCleanupBlock() {
+ assert(!EHStack.empty() && "cleanup stack is empty!");
+ if (isa<EHLazyCleanupScope>(*EHStack.begin()))
+ return PopLazyCleanupBlock(*this);
+
+ assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+
+ // Handle the EH cleanup if (1) there is one and (2) it's different
+ // from the normal cleanup.
+ if (Scope.isEHCleanup() &&
+ Scope.getEHEntry() != Scope.getNormalEntry()) {
+ llvm::BasicBlock *EHEntry = Scope.getEHEntry();
+ llvm::BasicBlock *EHExit = Scope.getEHExit();
+
+ if (EHEntry->use_empty()) {
+ DestroyCleanup(*this, EHEntry, EHExit);
+ } else {
+ // TODO: this isn't really the ideal location to put this EH
+ // cleanup, but lazy emission is a better solution than trying
+ // to pick a better spot.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ EmitBlock(EHEntry);
+ Builder.restoreIP(SavedIP);
+
+ SimplifyCleanupEdges(*this, EHEntry, EHExit);
+ }
+ }
+
+ // If we only have an EH cleanup, we don't really need to do much
+ // here. Branch fixups just naturally drop down to the enclosing
+ // cleanup scope.
+ if (!Scope.isNormalCleanup()) {
+ EHStack.popCleanup();
+ assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups());
+ return;
+ }
- if (Info.EHOnly) {
- // FIXME: Add this to the exceptional edge
- if (Info.CleanupBlock->getNumUses() == 0)
- delete Info.CleanupBlock;
+ // Check whether the scope has any fixups that need to be threaded.
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
+
+ // Grab the entry and exit blocks.
+ llvm::BasicBlock *Entry = Scope.getNormalEntry();
+ llvm::BasicBlock *Exit = Scope.getNormalExit();
+
+ // Check whether anything's been threaded through the cleanup already.
+ assert((Exit->getTerminator() == 0) == Entry->use_empty() &&
+ "cleanup entry/exit mismatch");
+ bool HasExistingBranches = !Entry->use_empty();
+
+ // Check whether we need to emit a "fallthrough" branch through the
+ // cleanup for the current insertion point.
+ llvm::BasicBlock *FallThrough = Builder.GetInsertBlock();
+ if (FallThrough && FallThrough->getTerminator())
+ FallThrough = 0;
+
+ // If *nothing* is using the cleanup, kill it.
+ if (!FallThrough && !HasFixups && !HasExistingBranches) {
+ EHStack.popCleanup();
+ DestroyCleanup(*this, Entry, Exit);
return;
}
- // Scrub debug location info.
- for (llvm::BasicBlock::iterator LBI = Info.CleanupBlock->begin(),
- LBE = Info.CleanupBlock->end(); LBI != LBE; ++LBI)
- Builder.SetInstDebugLocation(LBI);
+ // Otherwise, add the block to the function.
+ EmitBlock(Entry);
- llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
- if (CurBB && !CurBB->getTerminator() &&
- Info.CleanupBlock->getNumUses() == 0) {
- CurBB->getInstList().splice(CurBB->end(), Info.CleanupBlock->getInstList());
- delete Info.CleanupBlock;
- } else
- EmitBlock(Info.CleanupBlock);
+ if (FallThrough)
+ Builder.SetInsertPoint(Exit);
+ else
+ Builder.ClearInsertionPoint();
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
-}
+ // Fast case: if we don't have to add any fixups, and either
+ // we don't have a fallthrough or the cleanup wasn't previously
+ // used, then the setup above is sufficient.
+ if (!HasFixups) {
+ if (!FallThrough) {
+ assert(HasExistingBranches && "no reason for cleanup but didn't kill before");
+ EHStack.popCleanup();
+ SimplifyCleanupEdges(*this, Entry, Exit);
+ return;
+ } else if (!HasExistingBranches) {
+ assert(FallThrough && "no reason for cleanup but didn't kill before");
+ // We can't simplify the exit edge in this case because we're
+ // already inserting at the end of the exit block.
+ EHStack.popCleanup();
+ SimplifyCleanupEntry(*this, Entry);
+ return;
+ }
+ }
+
+ // Otherwise we're going to have to thread things through the cleanup.
+ llvm::SmallVector<BranchFixup*, 8> Fixups;
-void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI) {
- assert(!CleanupEntries.empty() &&
- "Trying to add branch fixup without cleanup block!");
+ // Synthesize a fixup for the current insertion point.
+ BranchFixup Cur;
+ if (FallThrough) {
+ Cur.Destination = createBasicBlock("cleanup.cont");
+ Cur.LatestBranch = FallThrough->getTerminator();
+ Cur.LatestBranchIndex = 0;
+ Cur.Origin = Cur.LatestBranch;
- // FIXME: We could be more clever here and check if there's already a branch
- // fixup for this destination and recycle it.
- CleanupEntries.back().BranchFixups.push_back(BI);
+ // Restore fixup invariant. EmitBlock added a branch to the cleanup
+ // which we need to redirect to the destination.
+ cast<llvm::BranchInst>(Cur.LatestBranch)->setSuccessor(0, Cur.Destination);
+
+ Fixups.push_back(&Cur);
+ } else {
+ Cur.Destination = 0;
+ }
+
+ // Collect any "real" fixups we need to thread.
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I != E; ++I)
+ if (EHStack.getBranchFixup(I).Destination)
+ Fixups.push_back(&EHStack.getBranchFixup(I));
+
+ assert(!Fixups.empty() && "no fixups, invariants broken!");
+
+ // If there's only a single fixup to thread through, do so with
+ // unconditional branches. This only happens if there's a single
+ // branch and no fallthrough.
+ if (Fixups.size() == 1 && !HasExistingBranches) {
+ Fixups[0]->LatestBranch->setSuccessor(Fixups[0]->LatestBranchIndex, Entry);
+ llvm::BranchInst *Br =
+ llvm::BranchInst::Create(Fixups[0]->Destination, Exit);
+ Fixups[0]->LatestBranch = Br;
+ Fixups[0]->LatestBranchIndex = 0;
+
+ // Otherwise, force a switch statement and thread everything through
+ // the switch.
+ } else {
+ CreateCleanupSwitch(*this, Exit);
+ for (unsigned I = 0, E = Fixups.size(); I != E; ++I)
+ ThreadFixupThroughCleanup(*this, *Fixups[I], Entry, Exit);
+ }
+
+ // Emit the fallthrough destination block if necessary.
+ if (Cur.Destination)
+ EmitBlock(Cur.Destination);
+
+ // We're finally done with the cleanup.
+ EHStack.popCleanup();
}
-void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest) {
+void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
if (!HaveInsertPoint())
return;
- llvm::BranchInst* BI = Builder.CreateBr(Dest);
-
- Builder.ClearInsertionPoint();
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
- // The stack is empty, no need to do any cleanup.
- if (CleanupEntries.empty())
+ // If we're not in a cleanup scope, we don't need to worry about
+ // fixups.
+ if (!EHStack.hasNormalCleanups()) {
+ Builder.ClearInsertionPoint();
return;
+ }
- if (!Dest->getParent()) {
- // We are trying to branch to a block that hasn't been inserted yet.
- AddBranchFixup(BI);
+ // Initialize a fixup.
+ BranchFixup Fixup;
+ Fixup.Destination = Dest.Block;
+ Fixup.Origin = BI;
+ Fixup.LatestBranch = BI;
+ Fixup.LatestBranchIndex = 0;
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope.
+ if (!Dest.ScopeDepth.isValid()) {
+ EHStack.addBranchFixup() = Fixup;
+ Builder.ClearInsertionPoint();
return;
}
- BlockScopeMap::iterator I = BlockScopes.find(Dest);
- if (I == BlockScopes.end()) {
- // We are trying to jump to a block that is outside of any cleanup scope.
- AddBranchFixup(BI);
- return;
+ for (EHScopeStack::iterator I = EHStack.begin(),
+ E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
+ if (isa<EHCleanupScope>(*I)) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
+ if (Scope.isNormalCleanup())
+ ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(),
+ Scope.getNormalExit());
+ } else if (isa<EHLazyCleanupScope>(*I)) {
+ EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
+ if (Scope.isNormalCleanup()) {
+ llvm::BasicBlock *Block = Scope.getNormalBlock();
+ if (!Block) {
+ Block = createBasicBlock("cleanup");
+ Scope.setNormalBlock(Block);
+ }
+ ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
+ }
+ }
}
+
+ Builder.ClearInsertionPoint();
+}
- assert(I->second < CleanupEntries.size() &&
- "Trying to branch into cleanup region");
+void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) {
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
- if (I->second == CleanupEntries.size() - 1) {
- // We have a branch to a block in the same scope.
+ // If we're not in a cleanup scope, we don't need to worry about
+ // fixups.
+ if (!EHStack.hasEHCleanups()) {
+ Builder.ClearInsertionPoint();
return;
}
- AddBranchFixup(BI);
+ // Initialize a fixup.
+ BranchFixup Fixup;
+ Fixup.Destination = Dest.Block;
+ Fixup.Origin = BI;
+ Fixup.LatestBranch = BI;
+ Fixup.LatestBranchIndex = 0;
+
+ // We should never get invalid scope depths for these: invalid scope
+ // depths only arise for as-yet-unemitted labels, and we can't do an
+ // EH-unwind to one of those.
+ assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?");
+
+ for (EHScopeStack::iterator I = EHStack.begin(),
+ E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
+ if (isa<EHCleanupScope>(*I)) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
+ if (Scope.isEHCleanup())
+ ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(),
+ Scope.getEHExit());
+ } else if (isa<EHLazyCleanupScope>(*I)) {
+ EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
+ if (Scope.isEHCleanup()) {
+ llvm::BasicBlock *Block = Scope.getEHBlock();
+ if (!Block) {
+ Block = createBasicBlock("eh.cleanup");
+ Scope.setEHBlock(Block);
+ }
+ ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
+ }
+ }
+ }
+
+ Builder.ClearInsertionPoint();
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
index ece275e..5ee3db0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -37,6 +37,7 @@ namespace llvm {
class SwitchInst;
class Twine;
class Value;
+ class CallSite;
}
namespace clang {
@@ -69,12 +70,317 @@ namespace CodeGen {
class CGRecordLayout;
class CGBlockInfo;
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The origin of the branch. Any switch-index stores required by
+ /// cleanup threading are added before this instruction.
+ llvm::Instruction *Origin;
+
+ /// The destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ llvm::BasicBlock *Destination;
+
+ /// The last branch of the fixup. It is an invariant that
+ /// LatestBranch->getSuccessor(LatestBranchIndex) == Destination.
+ ///
+ /// The branch is always either a BranchInst or a SwitchInst.
+ llvm::TerminatorInst *LatestBranch;
+ unsigned LatestBranchIndex;
+};
+
+enum CleanupKind { NormalAndEHCleanup, EHCleanup, NormalCleanup };
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from StartOfData to EndOfBuffer.
+ ptrdiff_t Size;
+
+ stable_iterator(ptrdiff_t Size) : Size(Size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() : Size(-1) {}
+
+ bool isValid() const { return Size >= 0; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.Size == B.Size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.Size != B.Size;
+ }
+ };
+
+ /// A lazy cleanup. Subclasses must be POD-like: cleanups will
+ /// not be destructed, and they will be allocated on the cleanup
+ /// stack and freely copied and moved around.
+ ///
+ /// LazyCleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class LazyCleanup {
+ public:
+ // Anchor the construction vtable. We use the destructor because
+ // gcc gives an obnoxious warning if there are virtual methods
+ // with an accessible non-virtual destructor. Unfortunately,
+ // declaring this destructor makes it non-trivial, but there
+ // doesn't seem to be any other way around this warning.
+ //
+ // This destructor will never be called.
+ virtual ~LazyCleanup();
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param IsForEHCleanup true if this is for an EH cleanup, false
+ /// if for a normal cleanup.
+ virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
+ };
+
+private:
+ // The implementation for this class is in CGException.h and
+ // CGException.cpp; the definition is here because it's used as a
+ // member of CodeGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ char *StartOfBuffer;
+
+ /// The end of the buffer.
+ char *EndOfBuffer;
+
+ /// The first valid entry in the buffer.
+ char *StartOfData;
+
+ /// The innermost normal cleanup on the stack.
+ stable_iterator InnermostNormalCleanup;
+
+ /// The innermost EH cleanup on the stack.
+ stable_iterator InnermostEHCleanup;
+
+ /// The number of catches on the stack.
+ unsigned CatchDepth;
+
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ llvm::SmallVector<BranchFixup, 8> BranchFixups;
+
+ char *allocate(size_t Size);
+
+ void popNullFixups();
+
+ void *pushLazyCleanup(CleanupKind K, size_t DataSize);
+
+public:
+ EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
+ InnermostNormalCleanup(stable_end()),
+ InnermostEHCleanup(stable_end()),
+ CatchDepth(0) {}
+ ~EHScopeStack() { delete[] StartOfBuffer; }
+
+ // Variadic templates would make this not terrible.
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T>
+ void pushLazyCleanup(CleanupKind Kind) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T();
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0, a1);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0, a1, a2);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2, class A3>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
+ (void) Obj;
+ }
+
+ /// Push a cleanup on the stack.
+ void pushCleanup(llvm::BasicBlock *NormalEntry,
+ llvm::BasicBlock *NormalExit,
+ llvm::BasicBlock *EHEntry,
+ llvm::BasicBlock *EHExit);
+
+ /// Pops a cleanup scope off the stack. This should only be called
+ /// by CodeGenFunction::PopCleanupBlock.
+ void popCleanup();
+
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned NumHandlers);
+
+ /// Pops a catch scope off the stack.
+ void popCatch();
+
+ /// Push an exceptions filter on the stack.
+ class EHFilterScope *pushFilter(unsigned NumFilters);
+
+ /// Pops an exceptions filter off the stack.
+ void popFilter();
+
+ /// Push a terminate handler on the stack.
+ void pushTerminate();
+
+ /// Pops a terminate handler off the stack.
+ void popTerminate();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return StartOfData == EndOfBuffer; }
+
+ bool requiresLandingPad() const {
+ return (CatchDepth || hasEHCleanups());
+ }
+
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return InnermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return InnermostNormalCleanup;
+ }
+
+ /// Determines whether there are any EH cleanups on the stack.
+ bool hasEHCleanups() const {
+ return InnermostEHCleanup != stable_end();
+ }
+
+ /// Returns the innermost EH cleanup on the stack, or stable_end()
+ /// if there are no EH cleanups.
+ stable_iterator getInnermostEHCleanup() const {
+ return InnermostEHCleanup;
+ }
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Returns an iterator pointing to the outermost EH scope.
+ iterator end() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(EndOfBuffer - StartOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() {
+ return stable_iterator(0);
+ }
+
+ /// Translates an iterator into a stable_iterator.
+ stable_iterator stabilize(iterator it) const;
+
+ /// Finds the nearest cleanup enclosing the given iterator.
+ /// Returns stable_iterator::invalid() if there are no such cleanups.
+ stable_iterator getEnclosingEHCleanup(iterator it) const;
+
+ /// Turn a stable reference to a scope depth into a unstable pointer
+ /// to the EH stack.
+ iterator find(stable_iterator save) const;
+
+ /// Removes the cleanup pointed to by the given stable_iterator.
+ void removeCleanup(stable_iterator save);
+
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ BranchFixups.push_back(BranchFixup());
+ return BranchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return BranchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned I) {
+ assert(I < getNumBranchFixups());
+ return BranchFixups[I];
+ }
+
+ /// Mark any branch fixups leading to the given block as resolved.
+ void resolveBranchFixups(llvm::BasicBlock *Dest);
+};
+
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
class CodeGenFunction : public BlockFunction {
CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
public:
+ /// A jump destination is a pair of a basic block and a cleanup
+ /// depth. They are used to implement direct jumps across cleanup
+ /// scopes, e.g. goto, break, continue, and return.
+ struct JumpDest {
+ JumpDest() : Block(0), ScopeDepth() {}
+ JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth)
+ : Block(Block), ScopeDepth(Depth) {}
+
+ llvm::BasicBlock *Block;
+ EHScopeStack::stable_iterator ScopeDepth;
+ };
+
CodeGenModule &CGM; // Per-module state.
const TargetInfo &Target;
@@ -94,7 +400,8 @@ public:
GlobalDecl CurGD;
/// ReturnBlock - Unified return block.
- llvm::BasicBlock *ReturnBlock;
+ JumpDest ReturnBlock;
+
/// ReturnValue - The temporary alloca to hold the return value. This is null
/// iff the function has no return value.
llvm::Value *ReturnValue;
@@ -103,7 +410,8 @@ public:
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
- const llvm::Type *LLVMIntTy;
+ // intptr_t, i32, i64
+ const llvm::IntegerType *IntPtrTy, *Int32Ty, *Int64Ty;
uint32_t LLVMPointerWidth;
bool Exceptions;
@@ -112,141 +420,97 @@ public:
/// \brief A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
-
-public:
- /// ObjCEHValueStack - Stack of Objective-C exception values, used for
- /// rethrows.
- llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
- /// PushCleanupBlock - Push a new cleanup entry on the stack and set the
- /// passed in block as the cleanup block.
- void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock,
- llvm::BasicBlock *CleanupExitBlock,
- llvm::BasicBlock *PreviousInvokeDest,
- bool EHOnly = false);
- void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock) {
- PushCleanupBlock(CleanupEntryBlock, 0, getInvokeDest(), false);
- }
-
- /// CleanupBlockInfo - A struct representing a popped cleanup block.
- struct CleanupBlockInfo {
- /// CleanupEntryBlock - the cleanup entry block
- llvm::BasicBlock *CleanupBlock;
+ EHScopeStack EHStack;
- /// SwitchBlock - the block (if any) containing the switch instruction used
- /// for jumping to the final destination.
- llvm::BasicBlock *SwitchBlock;
+ /// The exception slot. All landing pads write the current
+ /// exception pointer into this alloca.
+ llvm::Value *ExceptionSlot;
- /// EndBlock - the default destination for the switch instruction.
- llvm::BasicBlock *EndBlock;
+ /// Emits a landing pad for the current EH stack.
+ llvm::BasicBlock *EmitLandingPad();
- /// EHOnly - True iff this cleanup should only be performed on the
- /// exceptional edge.
- bool EHOnly;
+ llvm::BasicBlock *getInvokeDestImpl();
- CleanupBlockInfo(llvm::BasicBlock *cb, llvm::BasicBlock *sb,
- llvm::BasicBlock *eb, bool ehonly = false)
- : CleanupBlock(cb), SwitchBlock(sb), EndBlock(eb), EHOnly(ehonly) {}
- };
+public:
+ /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+ /// rethrows.
+ llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
- /// EHCleanupBlock - RAII object that will create a cleanup block for the
- /// exceptional edge and set the insert point to that block. When destroyed,
- /// it creates the cleanup edge and sets the insert point to the previous
- /// block.
- class EHCleanupBlock {
- CodeGenFunction& CGF;
- llvm::BasicBlock *PreviousInsertionBlock;
- llvm::BasicBlock *CleanupHandler;
- llvm::BasicBlock *PreviousInvokeDest;
- public:
- EHCleanupBlock(CodeGenFunction &cgf)
- : CGF(cgf),
- PreviousInsertionBlock(CGF.Builder.GetInsertBlock()),
- CleanupHandler(CGF.createBasicBlock("ehcleanup", CGF.CurFn)),
- PreviousInvokeDest(CGF.getInvokeDest()) {
- llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler();
- CGF.Builder.SetInsertPoint(CleanupHandler);
- CGF.setInvokeDest(TerminateHandler);
- }
- ~EHCleanupBlock();
+ // A struct holding information about a finally block's IR
+ // generation. For now, doesn't actually hold anything.
+ struct FinallyInfo {
};
- /// PopCleanupBlock - Will pop the cleanup entry on the stack, process all
- /// branch fixups and return a block info struct with the switch block and end
- /// block. This will also reset the invoke handler to the previous value
- /// from when the cleanup block was created.
- CleanupBlockInfo PopCleanupBlock();
-
- /// DelayedCleanupBlock - RAII object that will create a cleanup block and set
- /// the insert point to that block. When destructed, it sets the insert point
- /// to the previous block and pushes a new cleanup entry on the stack.
- class DelayedCleanupBlock {
- CodeGenFunction& CGF;
- llvm::BasicBlock *CurBB;
- llvm::BasicBlock *CleanupEntryBB;
- llvm::BasicBlock *CleanupExitBB;
- llvm::BasicBlock *CurInvokeDest;
- bool EHOnly;
+ FinallyInfo EnterFinallyBlock(const Stmt *Stmt,
+ llvm::Constant *BeginCatchFn,
+ llvm::Constant *EndCatchFn,
+ llvm::Constant *RethrowFn);
+ void ExitFinallyBlock(FinallyInfo &FinallyInfo);
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object destructor of an object of the given type at the
+ /// given address. Does nothing if T is not a C++ class type with a
+ /// non-trivial destructor.
+ void PushDestructorCleanup(QualType T, llvm::Value *Addr);
+
+ /// PopCleanupBlock - Will pop the cleanup entry on the stack and
+ /// process all branch fixups.
+ void PopCleanupBlock();
+
+ /// CleanupBlock - RAII object that will create a cleanup block and
+ /// set the insert point to that block. When destructed, it sets the
+ /// insert point to the previous block and pushes a new cleanup
+ /// entry on the stack.
+ class CleanupBlock {
+ CodeGenFunction &CGF;
+ CGBuilderTy::InsertPoint SavedIP;
+ llvm::BasicBlock *NormalCleanupEntryBB;
+ llvm::BasicBlock *NormalCleanupExitBB;
+ llvm::BasicBlock *EHCleanupEntryBB;
public:
- DelayedCleanupBlock(CodeGenFunction &cgf, bool ehonly = false)
- : CGF(cgf), CurBB(CGF.Builder.GetInsertBlock()),
- CleanupEntryBB(CGF.createBasicBlock("cleanup")),
- CleanupExitBB(0),
- CurInvokeDest(CGF.getInvokeDest()),
- EHOnly(ehonly) {
- CGF.Builder.SetInsertPoint(CleanupEntryBB);
- }
+ CleanupBlock(CodeGenFunction &CGF, CleanupKind Kind);
- llvm::BasicBlock *getCleanupExitBlock() {
- if (!CleanupExitBB)
- CleanupExitBB = CGF.createBasicBlock("cleanup.exit");
- return CleanupExitBB;
- }
+ /// If we're currently writing a normal cleanup, tie that off and
+ /// start writing an EH cleanup.
+ void beginEHCleanup();
- ~DelayedCleanupBlock() {
- CGF.PushCleanupBlock(CleanupEntryBB, CleanupExitBB, CurInvokeDest,
- EHOnly);
- // FIXME: This is silly, move this into the builder.
- if (CurBB)
- CGF.Builder.SetInsertPoint(CurBB);
- else
- CGF.Builder.ClearInsertionPoint();
- }
+ ~CleanupBlock();
};
- /// \brief Enters a new scope for capturing cleanups, all of which will be
- /// executed once the scope is exited.
- class CleanupScope {
+ /// \brief Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
CodeGenFunction& CGF;
- size_t CleanupStackDepth;
+ EHScopeStack::stable_iterator CleanupStackDepth;
bool OldDidCallStackSave;
bool PerformCleanup;
- CleanupScope(const CleanupScope &); // DO NOT IMPLEMENT
- CleanupScope &operator=(const CleanupScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
public:
/// \brief Enter a new cleanup scope.
- explicit CleanupScope(CodeGenFunction &CGF)
+ explicit RunCleanupsScope(CodeGenFunction &CGF)
: CGF(CGF), PerformCleanup(true)
{
- CleanupStackDepth = CGF.CleanupEntries.size();
+ CleanupStackDepth = CGF.EHStack.stable_begin();
OldDidCallStackSave = CGF.DidCallStackSave;
}
/// \brief Exit this cleanup scope, emitting any accumulated
/// cleanups.
- ~CleanupScope() {
+ ~RunCleanupsScope() {
if (PerformCleanup) {
CGF.DidCallStackSave = OldDidCallStackSave;
- CGF.EmitCleanupBlocks(CleanupStackDepth);
+ CGF.PopCleanupBlocks(CleanupStackDepth);
}
}
/// \brief Determine whether this scope requires any cleanups.
bool requiresCleanups() const {
- return CGF.CleanupEntries.size() > CleanupStackDepth;
+ return CGF.EHStack.stable_begin() != CleanupStackDepth;
}
/// \brief Force the emission of cleanups now, instead of waiting
@@ -254,42 +518,39 @@ public:
void ForceCleanup() {
assert(PerformCleanup && "Already forced cleanup");
CGF.DidCallStackSave = OldDidCallStackSave;
- CGF.EmitCleanupBlocks(CleanupStackDepth);
+ CGF.PopCleanupBlocks(CleanupStackDepth);
PerformCleanup = false;
}
};
- /// CXXTemporariesCleanupScope - Enters a new scope for catching live
- /// temporaries, all of which will be popped once the scope is exited.
- class CXXTemporariesCleanupScope {
- CodeGenFunction &CGF;
- size_t NumLiveTemporaries;
-
- // DO NOT IMPLEMENT
- CXXTemporariesCleanupScope(const CXXTemporariesCleanupScope &);
- CXXTemporariesCleanupScope &operator=(const CXXTemporariesCleanupScope &);
-
- public:
- explicit CXXTemporariesCleanupScope(CodeGenFunction &CGF)
- : CGF(CGF), NumLiveTemporaries(CGF.LiveTemporaries.size()) { }
-
- ~CXXTemporariesCleanupScope() {
- while (CGF.LiveTemporaries.size() > NumLiveTemporaries)
- CGF.PopCXXTemporary();
- }
- };
+ /// PopCleanupBlocks - Takes the old cleanup stack size and emits
+ /// the cleanup blocks that have been added.
+ void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
- /// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup
- /// blocks that have been added.
- void EmitCleanupBlocks(size_t OldCleanupStackSize);
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) const {
+ return JumpDest(Target, EHStack.stable_begin());
+ }
- /// EmitBranchThroughCleanup - Emit a branch from the current insert block
- /// through the cleanup handling code (if any) and then on to \arg Dest.
- ///
- /// FIXME: Maybe this should really be in EmitBranch? Don't we always want
- /// this behavior for branches?
- void EmitBranchThroughCleanup(llvm::BasicBlock *Dest);
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(const char *Name = 0) {
+ return JumpDest(createBasicBlock(Name), EHStack.stable_begin());
+ }
+
+ /// EmitBranchThroughCleanup - Emit a branch from the current insert
+ /// block through the normal cleanup handling code (if any) and then
+ /// on to \arg Dest.
+ void EmitBranchThroughCleanup(JumpDest Dest);
+
+ /// EmitBranchThroughEHCleanup - Emit a branch from the current
+ /// insert block through the EH cleanup handling code (if any) and
+ /// then on to \arg Dest.
+ void EmitBranchThroughEHCleanup(JumpDest Dest);
/// BeginConditionalBranch - Should be called before a conditional part of an
/// expression is emitted. For example, before the RHS of the expression below
@@ -326,16 +587,16 @@ private:
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
/// LabelMap - This keeps track of the LLVM basic block for each C label.
- llvm::DenseMap<const LabelStmt*, llvm::BasicBlock*> LabelMap;
+ llvm::DenseMap<const LabelStmt*, JumpDest> LabelMap;
// BreakContinueStack - This keeps track of where break and continue
// statements should jump to.
struct BreakContinue {
- BreakContinue(llvm::BasicBlock *bb, llvm::BasicBlock *cb)
- : BreakBlock(bb), ContinueBlock(cb) {}
+ BreakContinue(JumpDest Break, JumpDest Continue)
+ : BreakBlock(Break), ContinueBlock(Continue) {}
- llvm::BasicBlock *BreakBlock;
- llvm::BasicBlock *ContinueBlock;
+ JumpDest BreakBlock;
+ JumpDest ContinueBlock;
};
llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
@@ -363,44 +624,9 @@ private:
/// calling llvm.stacksave for multiple VLAs in the same scope.
bool DidCallStackSave;
- struct CleanupEntry {
- /// CleanupEntryBlock - The block of code that does the actual cleanup.
- llvm::BasicBlock *CleanupEntryBlock;
-
- /// CleanupExitBlock - The cleanup exit block.
- llvm::BasicBlock *CleanupExitBlock;
-
- /// Blocks - Basic blocks that were emitted in the current cleanup scope.
- std::vector<llvm::BasicBlock *> Blocks;
-
- /// BranchFixups - Branch instructions to basic blocks that haven't been
- /// inserted into the current function yet.
- std::vector<llvm::BranchInst *> BranchFixups;
-
- /// PreviousInvokeDest - The invoke handler from the start of the cleanup
- /// region.
- llvm::BasicBlock *PreviousInvokeDest;
-
- /// EHOnly - Perform this only on the exceptional edge, not the main edge.
- bool EHOnly;
-
- explicit CleanupEntry(llvm::BasicBlock *CleanupEntryBlock,
- llvm::BasicBlock *CleanupExitBlock,
- llvm::BasicBlock *PreviousInvokeDest,
- bool ehonly)
- : CleanupEntryBlock(CleanupEntryBlock),
- CleanupExitBlock(CleanupExitBlock),
- PreviousInvokeDest(PreviousInvokeDest),
- EHOnly(ehonly) {}
- };
-
- /// CleanupEntries - Stack of cleanup entries.
- llvm::SmallVector<CleanupEntry, 8> CleanupEntries;
-
- typedef llvm::DenseMap<llvm::BasicBlock*, size_t> BlockScopeMap;
-
- /// BlockScopes - Map of which "cleanup scope" scope basic blocks have.
- BlockScopeMap BlockScopes;
+ /// A block containing a single 'unreachable' instruction. Created
+ /// lazily by getUnreachableBlock().
+ llvm::BasicBlock *UnreachableBlock;
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
@@ -413,31 +639,6 @@ private:
ImplicitParamDecl *CXXVTTDecl;
llvm::Value *CXXVTTValue;
- /// CXXLiveTemporaryInfo - Holds information about a live C++ temporary.
- struct CXXLiveTemporaryInfo {
- /// Temporary - The live temporary.
- const CXXTemporary *Temporary;
-
- /// ThisPtr - The pointer to the temporary.
- llvm::Value *ThisPtr;
-
- /// DtorBlock - The destructor block.
- llvm::BasicBlock *DtorBlock;
-
- /// CondPtr - If this is a conditional temporary, this is the pointer to the
- /// condition variable that states whether the destructor should be called
- /// or not.
- llvm::Value *CondPtr;
-
- CXXLiveTemporaryInfo(const CXXTemporary *temporary,
- llvm::Value *thisptr, llvm::BasicBlock *dtorblock,
- llvm::Value *condptr)
- : Temporary(temporary), ThisPtr(thisptr), DtorBlock(dtorblock),
- CondPtr(condptr) { }
- };
-
- llvm::SmallVector<CXXLiveTemporaryInfo, 4> LiveTemporaries;
-
/// ConditionalBranchLevel - Contains the nesting level of the current
/// conditional branch. This is used so that we know if a temporary should be
/// destroyed conditionally.
@@ -453,18 +654,32 @@ private:
/// number that holds the value.
unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
+ llvm::BasicBlock *TerminateLandingPad;
llvm::BasicBlock *TerminateHandler;
llvm::BasicBlock *TrapBB;
- int UniqueAggrDestructorCount;
public:
CodeGenFunction(CodeGenModule &cgm);
ASTContext &getContext() const;
CGDebugInfo *getDebugInfo() { return DebugInfo; }
- llvm::BasicBlock *getInvokeDest() { return InvokeDest; }
- void setInvokeDest(llvm::BasicBlock *B) { InvokeDest = B; }
+ /// Returns a pointer to the function's exception object slot, which
+ /// is assigned in every landing pad.
+ llvm::Value *getExceptionSlot();
+
+ llvm::BasicBlock *getUnreachableBlock() {
+ if (!UnreachableBlock) {
+ UnreachableBlock = createBasicBlock("unreachable");
+ new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
+ }
+ return UnreachableBlock;
+ }
+
+ llvm::BasicBlock *getInvokeDest() {
+ if (!EHStack.requiresLandingPad()) return 0;
+ return getInvokeDestImpl();
+ }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
@@ -501,7 +716,8 @@ public:
const llvm::StructType *,
std::vector<HelperInfo> *);
- llvm::Function *GenerateBlockFunction(const BlockExpr *BExpr,
+ llvm::Function *GenerateBlockFunction(GlobalDecl GD,
+ const BlockExpr *BExpr,
CGBlockInfo &Info,
const Decl *OuterFuncDecl,
llvm::DenseMap<const Decl*, llvm::Value*> ldm);
@@ -567,6 +783,15 @@ public:
void EmitDtorEpilogue(const CXXDestructorDecl *Dtor,
CXXDtorType Type);
+ /// ShouldInstrumentFunction - Return true if the current function should be
+ /// instrumented with __cyg_profile_func_* calls
+ bool ShouldInstrumentFunction();
+
+ /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+ /// instrumentation function with the current function and the call site, if
+ /// function instrumentation is enabled.
+ void EmitFunctionInstrumentation(const char *Fn);
+
/// EmitFunctionProlog - Emit the target specific LLVM code to load the
/// arguments for the given function. This is also responsible for naming the
/// LLVM function arguments.
@@ -576,7 +801,7 @@ public:
/// EmitFunctionEpilog - Emit the target specific LLVM code to return the
/// given temporary.
- void EmitFunctionEpilog(const CGFunctionInfo &FI, llvm::Value *ReturnValue);
+ void EmitFunctionEpilog(const CGFunctionInfo &FI);
/// EmitStartEHSpec - Emit the start of the exception spec.
void EmitStartEHSpec(const Decl *D);
@@ -584,7 +809,12 @@ public:
/// EmitEndEHSpec - Emit the end of the exception spec.
void EmitEndEHSpec(const Decl *D);
- /// getTerminateHandler - Return a handler that just calls terminate.
+ /// getTerminateLandingPad - Return a landing pad that just calls terminate.
+ llvm::BasicBlock *getTerminateLandingPad();
+
+ /// getTerminateHandler - Return a handler (not a landing pad, just
+ /// a catch handler) that just calls terminate. This is used when
+ /// a terminate scope encloses a try.
llvm::BasicBlock *getTerminateHandler();
const llvm::Type *ConvertTypeForMem(QualType T);
@@ -617,7 +847,7 @@ public:
/// getBasicBlockForLabel - Return the LLVM basicblock that the specified
/// label maps to.
- llvm::BasicBlock *getBasicBlockForLabel(const LabelStmt *S);
+ JumpDest getJumpDestForLabel(const LabelStmt *S);
/// SimplifyForwardingBlocks - If the given basic block is only a branch to
/// another basic block, simplify it. This assumes that no other code could
@@ -688,11 +918,11 @@ public:
/// value needs to be stored into an alloca (for example, to avoid explicit
/// PHI construction), but the type is the IR type, not the type appropriate
/// for storing in memory.
- llvm::Value *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp");
+ llvm::AllocaInst *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignment.
- llvm::Value *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
+ llvm::AllocaInst *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
@@ -835,15 +1065,17 @@ public:
llvm::Value *NumElements,
llvm::Value *This);
- llvm::Constant *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This);
+ llvm::Function *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This);
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
bool ForVirtualBase, llvm::Value *This);
+
+ void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr,
+ llvm::Value *NumElements);
- void PushCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
- void PopCXXTemporary();
+ void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
@@ -874,10 +1106,13 @@ public:
/// This function can be called with a null (unreachable) insert point.
void EmitBlockVarDecl(const VarDecl &D);
+ typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
+ llvm::Value *Address);
+
/// EmitLocalBlockVarDecl - Emit a local block variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
- void EmitLocalBlockVarDecl(const VarDecl &D);
+ void EmitLocalBlockVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0);
void EmitStaticBlockVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage);
@@ -938,13 +1173,8 @@ public:
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
llvm::Constant *getUnwindResumeOrRethrowFn();
- struct CXXTryStmtInfo {
- llvm::BasicBlock *SavedLandingPad;
- llvm::BasicBlock *HandlerBlock;
- llvm::BasicBlock *FinallyBlock;
- };
- CXXTryStmtInfo EnterCXXTryStmt(const CXXTryStmt &S);
- void ExitCXXTryStmt(const CXXTryStmt &S, CXXTryStmtInfo Info);
+ void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
+ void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
void EmitCXXTryStmt(const CXXTryStmt &S);
@@ -1050,7 +1280,7 @@ public:
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
LValue EmitConditionalOperatorLValue(const ConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
- LValue EmitNullInitializationLValue(const CXXZeroInitValueExpr *E);
+ LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
@@ -1088,6 +1318,7 @@ public:
LValue EmitObjCSuperExprLValue(const ObjCSuperExpr *E);
LValue EmitStmtExprLValue(const StmtExpr *E);
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
+ LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
//===--------------------------------------------------------------------===//
// Scalar Expression Emission
@@ -1114,6 +1345,11 @@ public:
RValue EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue = ReturnValueSlot());
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
+ llvm::Value * const *ArgBegin,
+ llvm::Value * const *ArgEnd,
+ const llvm::Twine &Name = "");
+
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
const llvm::Type *Ty);
llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
@@ -1146,6 +1382,14 @@ public:
llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitNeonCall(llvm::Function *F,
+ llvm::SmallVectorImpl<llvm::Value*> &O,
+ const char *name, bool splat = false,
+ unsigned shift = 0, bool rightshift = false);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
+ llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
+ bool negateForRightShift);
+
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -1164,7 +1408,8 @@ public:
/// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
/// expression. Will emit a temporary variable if E is not an LValue.
- RValue EmitReferenceBindingToExpr(const Expr* E, bool IsInitializer = false);
+ RValue EmitReferenceBindingToExpr(const Expr* E,
+ const NamedDecl *InitializedDecl);
//===--------------------------------------------------------------------===//
// Expression Emission
@@ -1260,7 +1505,7 @@ public:
/// GenerateCXXGlobalDtorFunc - Generates code for destroying global
/// variables.
void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
- const std::vector<std::pair<llvm::Constant*,
+ const std::vector<std::pair<llvm::WeakVH,
llvm::Constant*> > &DtorsAndObjects);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D);
@@ -1308,7 +1553,6 @@ public:
RValue EmitDelegateCallArg(const VarDecl *Param);
private:
-
void EmitReturnOfRValue(RValue RV, QualType Ty);
/// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
@@ -1331,13 +1575,6 @@ private:
const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr, std::string &ConstraintStr);
- /// EmitCleanupBlock - emits a single cleanup block.
- void EmitCleanupBlock();
-
- /// AddBranchFixup - adds a branch instruction to the list of fixups for the
- /// current cleanup scope.
- void AddBranchFixup(llvm::BranchInst *BI);
-
/// EmitCallArgs - Emit call arguments for a function.
/// The CallArgTypeInfo parameter is used for iterating over the known
/// argument types of the function being called.
@@ -1381,6 +1618,8 @@ private:
const TargetCodeGenInfo &getTargetHooks() const {
return CGM.getTargetCodeGenInfo();
}
+
+ void EmitDeclMetadata();
};
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
index 103024c..bf606a6 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -18,11 +18,12 @@
#include "CGObjCRuntime.h"
#include "Mangle.h"
#include "TargetInfo.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/Diagnostic.h"
@@ -86,8 +87,10 @@ void CodeGenModule::createObjCRuntime() {
}
void CodeGenModule::createCXXABI() {
- // For now, just create an Itanium ABI.
- ABI = CreateItaniumCXXABI(*this);
+ if (Context.Target.getCXXABI() == "microsoft")
+ ABI = CreateMicrosoftCXXABI(*this);
+ else
+ ABI = CreateItaniumCXXABI(*this);
}
void CodeGenModule::Release() {
@@ -101,6 +104,9 @@ void CodeGenModule::Release() {
EmitCtorList(GlobalDtors, "llvm.global_dtors");
EmitAnnotations();
EmitLLVMUsed();
+
+ if (getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
}
bool CodeGenModule::isTargetDarwin() const {
@@ -149,7 +155,38 @@ CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
return LangOptions::Protected;
}
}
+
+ if (getLangOptions().CPlusPlus) {
+ // Entities subject to an explicit instantiation declaration get default
+ // visibility.
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ if (Function->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return LangOptions::Default;
+ } else if (const ClassTemplateSpecializationDecl *ClassSpec
+ = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ if (ClassSpec->getSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return LangOptions::Default;
+ } else if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ if (Record->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return LangOptions::Default;
+ } else if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ if (Var->isStaticDataMember() &&
+ (Var->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration))
+ return LangOptions::Default;
+ }
+ // If -fvisibility-inlines-hidden was provided, then inline C++ member
+ // functions get "hidden" visibility by default.
+ if (getLangOptions().InlineVisibilityHidden)
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ if (Method->isInlined())
+ return LangOptions::Hidden;
+ }
+
// This decl should have the same visibility as its parent.
if (const DeclContext *DC = D->getDeclContext())
return getDeclVisibilityMode(cast<Decl>(DC));
@@ -176,32 +213,44 @@ void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
}
}
-void CodeGenModule::getMangledName(MangleBuffer &Buffer, GlobalDecl GD) {
+llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
- if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
- return getMangledCXXCtorName(Buffer, D, GD.getCtorType());
- if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
- return getMangledCXXDtorName(Buffer, D, GD.getDtorType());
-
- return getMangledName(Buffer, ND);
-}
+ llvm::StringRef &Str = MangledDeclNames[GD.getCanonicalDecl()];
+ if (!Str.empty())
+ return Str;
-/// \brief Retrieves the mangled name for the given declaration.
-///
-/// If the given declaration requires a mangled name, returns an
-/// const char* containing the mangled name. Otherwise, returns
-/// the unmangled name.
-///
-void CodeGenModule::getMangledName(MangleBuffer &Buffer,
- const NamedDecl *ND) {
if (!getMangleContext().shouldMangleDeclName(ND)) {
- assert(ND->getIdentifier() && "Attempt to mangle unnamed decl.");
- Buffer.setString(ND->getNameAsCString());
- return;
+ IdentifierInfo *II = ND->getIdentifier();
+ assert(II && "Attempt to mangle unnamed decl.");
+
+ Str = II->getName();
+ return Str;
}
+
+ llvm::SmallString<256> Buffer;
+ if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
+ getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Buffer);
+ else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
+ getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Buffer);
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND))
+ getMangleContext().mangleBlock(GD, BD, Buffer);
+ else
+ getMangleContext().mangleName(ND, Buffer);
+
+ // Allocate space for the mangled name.
+ size_t Length = Buffer.size();
+ char *Name = MangledNamesAllocator.Allocate<char>(Length);
+ std::copy(Buffer.begin(), Buffer.end(), Name);
+
+ Str = llvm::StringRef(Name, Length);
+
+ return Str;
+}
- getMangleContext().mangleName(ND, Buffer.getBuffer());
+void CodeGenModule::getMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD) {
+ getMangleContext().mangleBlock(GD, BD, Buffer.getBuffer());
}
llvm::GlobalValue *CodeGenModule::GetGlobalValue(llvm::StringRef Name) {
@@ -333,35 +382,39 @@ llvm::GlobalValue::LinkageTypes
CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
GVALinkage Linkage = GetLinkageForFunction(getContext(), D, Features);
- if (Linkage == GVA_Internal) {
+ if (Linkage == GVA_Internal)
return llvm::Function::InternalLinkage;
- } else if (D->hasAttr<DLLExportAttr>()) {
+
+ if (D->hasAttr<DLLExportAttr>())
return llvm::Function::DLLExportLinkage;
- } else if (D->hasAttr<WeakAttr>()) {
+
+ if (D->hasAttr<WeakAttr>())
return llvm::Function::WeakAnyLinkage;
- } else if (Linkage == GVA_C99Inline) {
- // In C99 mode, 'inline' functions are guaranteed to have a strong
- // definition somewhere else, so we can use available_externally linkage.
+
+ // In C99 mode, 'inline' functions are guaranteed to have a strong
+ // definition somewhere else, so we can use available_externally linkage.
+ if (Linkage == GVA_C99Inline)
return llvm::Function::AvailableExternallyLinkage;
- } else if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation) {
- // In C++, the compiler has to emit a definition in every translation unit
- // that references the function. We should use linkonce_odr because
- // a) if all references in this translation unit are optimized away, we
- // don't need to codegen it. b) if the function persists, it needs to be
- // merged with other definitions. c) C++ has the ODR, so we know the
- // definition is dependable.
+
+ // In C++, the compiler has to emit a definition in every translation unit
+ // that references the function. We should use linkonce_odr because
+ // a) if all references in this translation unit are optimized away, we
+ // don't need to codegen it. b) if the function persists, it needs to be
+ // merged with other definitions. c) C++ has the ODR, so we know the
+ // definition is dependable.
+ if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
return llvm::Function::LinkOnceODRLinkage;
- } else if (Linkage == GVA_ExplicitTemplateInstantiation) {
- // An explicit instantiation of a template has weak linkage, since
- // explicit instantiations can occur in multiple translation units
- // and must all be equivalent. However, we are not allowed to
- // throw away these explicit instantiations.
+
+ // An explicit instantiation of a template has weak linkage, since
+ // explicit instantiations can occur in multiple translation units
+ // and must all be equivalent. However, we are not allowed to
+ // throw away these explicit instantiations.
+ if (Linkage == GVA_ExplicitTemplateInstantiation)
return llvm::Function::WeakODRLinkage;
- } else {
- assert(Linkage == GVA_StrongExternal);
- // Otherwise, we have strong external linkage.
- return llvm::Function::ExternalLinkage;
- }
+
+ // Otherwise, we have strong external linkage.
+ assert(Linkage == GVA_StrongExternal);
+ return llvm::Function::ExternalLinkage;
}
@@ -521,8 +574,7 @@ void CodeGenModule::EmitDeferred() {
// ignore these cases.
//
// TODO: That said, looking this up multiple times is very wasteful.
- MangleBuffer Name;
- getMangledName(Name, D);
+ llvm::StringRef Name = getMangledName(D);
llvm::GlobalValue *CGRef = GetGlobalValue(Name);
assert(CGRef && "Deferred decl wasn't referenced?");
@@ -586,6 +638,47 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
return llvm::ConstantStruct::get(VMContext, Fields, 4, false);
}
+static CodeGenModule::GVALinkage
+GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
+ // If this is a static data member, compute the kind of template
+ // specialization. Otherwise, this variable is not part of a
+ // template.
+ TemplateSpecializationKind TSK = TSK_Undeclared;
+ if (VD->isStaticDataMember())
+ TSK = VD->getTemplateSpecializationKind();
+
+ Linkage L = VD->getLinkage();
+ if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus &&
+ VD->getType()->getLinkage() == UniqueExternalLinkage)
+ L = UniqueExternalLinkage;
+
+ switch (L) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return CodeGenModule::GVA_Internal;
+
+ case ExternalLinkage:
+ switch (TSK) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return CodeGenModule::GVA_StrongExternal;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ llvm_unreachable("Variable should not be instantiated");
+ // Fall through to treat this like any other instantiation.
+
+ case TSK_ExplicitInstantiationDefinition:
+ return CodeGenModule::GVA_ExplicitTemplateInstantiation;
+
+ case TSK_ImplicitInstantiation:
+ return CodeGenModule::GVA_TemplateInstantiation;
+ }
+ }
+
+ return CodeGenModule::GVA_StrongExternal;
+}
+
bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
// Never defer when EmitAllDecls is specified or the decl has
// attribute used.
@@ -634,24 +727,10 @@ bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
}
}
- // Static data may be deferred, but out-of-line static data members
- // cannot be.
- Linkage L = VD->getLinkage();
- if (L == ExternalLinkage && getContext().getLangOptions().CPlusPlus &&
- VD->getType()->getLinkage() == UniqueExternalLinkage)
- L = UniqueExternalLinkage;
-
- switch (L) {
- case NoLinkage:
- case InternalLinkage:
- case UniqueExternalLinkage:
- // Initializer has side effects?
- if (VD->getInit() && VD->getInit()->HasSideEffects(Context))
- return false;
- return !(VD->isStaticDataMember() && VD->isOutOfLine());
-
- case ExternalLinkage:
- break;
+ GVALinkage L = GetLinkageForVariable(getContext(), VD);
+ if (L == GVA_Internal || L == GVA_TemplateInstantiation) {
+ if (!(VD->getInit() && VD->getInit()->HasSideEffects(Context)))
+ return true;
}
return false;
@@ -716,8 +795,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// If the value has already been used, add it directly to the
// DeferredDeclsToEmit list.
- MangleBuffer MangledName;
- getMangledName(MangledName, GD);
+ llvm::StringRef MangledName = getMangledName(GD);
if (GetGlobalValue(MangledName))
DeferredDeclsToEmit.push_back(GD);
else {
@@ -735,18 +813,27 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
Context.getSourceManager(),
"Generating code for declaration");
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- if (Method->isVirtual())
- getVTables().EmitThunks(GD);
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ // At -O0, don't generate IR for functions with available_externally
+ // linkage.
+ if (CodeGenOpts.OptimizationLevel == 0 &&
+ getFunctionLinkage(Function)
+ == llvm::Function::AvailableExternallyLinkage)
+ return;
+
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isVirtual())
+ getVTables().EmitThunks(GD);
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
- return EmitCXXConstructor(CD, GD.getCtorType());
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
+ return EmitCXXConstructor(CD, GD.getCtorType());
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
- return EmitCXXDestructor(DD, GD.getDtorType());
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(Method))
+ return EmitCXXDestructor(DD, GD.getDtorType());
+ }
- if (isa<FunctionDecl>(D))
return EmitGlobalFunctionDefinition(GD);
+ }
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return EmitGlobalVarDefinition(VD);
@@ -797,6 +884,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
std::vector<const llvm::Type*>(), false);
IsIncompleteFunction = true;
}
+
llvm::Function *F = llvm::Function::Create(FTy,
llvm::Function::ExternalLinkage,
MangledName, &getModule());
@@ -857,8 +945,8 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
// If there was no specific requested type, just convert it now.
if (!Ty)
Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
- MangleBuffer MangledName;
- getMangledName(MangledName, GD);
+
+ llvm::StringRef MangledName = getMangledName(GD);
return GetOrCreateLLVMFunction(MangledName, Ty, GD);
}
@@ -961,8 +1049,7 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
const llvm::PointerType *PTy =
llvm::PointerType::get(Ty, ASTTy.getAddressSpace());
- MangleBuffer MangledName;
- getMangledName(MangledName, D);
+ llvm::StringRef MangledName = getMangledName(D);
return GetOrCreateLLVMGlobal(MangledName, PTy, D);
}
@@ -981,8 +1068,7 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
// If we have not seen a reference to this variable yet, place it
// into the deferred declarations table to be emitted if needed
// later.
- MangleBuffer MangledName;
- getMangledName(MangledName, D);
+ llvm::StringRef MangledName = getMangledName(D);
if (!GetGlobalValue(MangledName)) {
DeferredDecls[MangledName] = D;
return;
@@ -1008,7 +1094,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
// If this class has a key function, use that to determine the linkage of
// the vtable.
const FunctionDecl *Def = 0;
- if (KeyFunction->getBody(Def))
+ if (KeyFunction->hasBody(Def))
KeyFunction = cast<CXXMethodDecl>(Def);
switch (KeyFunction->getTemplateSpecializationKind()) {
@@ -1049,47 +1135,6 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
return llvm::GlobalVariable::WeakODRLinkage;
}
-static CodeGenModule::GVALinkage
-GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
- // If this is a static data member, compute the kind of template
- // specialization. Otherwise, this variable is not part of a
- // template.
- TemplateSpecializationKind TSK = TSK_Undeclared;
- if (VD->isStaticDataMember())
- TSK = VD->getTemplateSpecializationKind();
-
- Linkage L = VD->getLinkage();
- if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus &&
- VD->getType()->getLinkage() == UniqueExternalLinkage)
- L = UniqueExternalLinkage;
-
- switch (L) {
- case NoLinkage:
- case InternalLinkage:
- case UniqueExternalLinkage:
- return CodeGenModule::GVA_Internal;
-
- case ExternalLinkage:
- switch (TSK) {
- case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
- return CodeGenModule::GVA_StrongExternal;
-
- case TSK_ExplicitInstantiationDeclaration:
- llvm_unreachable("Variable should not be instantiated");
- // Fall through to treat this like any other instantiation.
-
- case TSK_ExplicitInstantiationDefinition:
- return CodeGenModule::GVA_ExplicitTemplateInstantiation;
-
- case TSK_ImplicitInstantiation:
- return CodeGenModule::GVA_TemplateInstantiation;
- }
- }
-
- return CodeGenModule::GVA_StrongExternal;
-}
-
CharUnits CodeGenModule::GetTargetTypeStoreSize(const llvm::Type *Ty) const {
return CharUnits::fromQuantity(
TheTargetData.getTypeStoreSizeInBits(Ty) / Context.getCharWidth());
@@ -1367,8 +1412,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
const AliasAttr *AA = D->getAttr<AliasAttr>();
assert(AA && "Not an alias?");
- MangleBuffer MangledName;
- getMangledName(MangledName, GD);
+ llvm::StringRef MangledName = getMangledName(GD);
// If there is a definition in the module, then it wins over the alias.
// This is dubious, but allow it to be safe. Just ignore the alias.
@@ -1409,7 +1453,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
Entry->getType()));
Entry->eraseFromParent();
} else {
- GA->setName(MangledName.getString());
+ GA->setName(MangledName);
}
// Set attributes which are particular to an alias; this is a
@@ -1418,7 +1462,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
if (D->hasAttr<DLLExportAttr>()) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// The dllexport attribute is ignored for undefined symbols.
- if (FD->getBody())
+ if (FD->hasBody())
GA->setLinkage(llvm::Function::DLLExportLinkage);
} else {
GA->setLinkage(llvm::Function::DLLExportLinkage);
@@ -2004,3 +2048,73 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
assert(isa<TypeDecl>(D) && "Unsupported decl kind");
}
}
+
+/// Turns the given pointer into a constant.
+static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
+ const void *Ptr) {
+ uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
+ const llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
+ return llvm::ConstantInt::get(i64, PtrInt);
+}
+
+static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
+ llvm::NamedMDNode *&GlobalMetadata,
+ GlobalDecl D,
+ llvm::GlobalValue *Addr) {
+ if (!GlobalMetadata)
+ GlobalMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
+
+ // TODO: should we report variant information for ctors/dtors?
+ llvm::Value *Ops[] = {
+ Addr,
+ GetPointerConstant(CGM.getLLVMContext(), D.getDecl())
+ };
+ GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops, 2));
+}
+
+/// Emits metadata nodes associating all the global values in the
+/// current module with the Decls they came from. This is useful for
+/// projects using IR gen as a subroutine.
+///
+/// Since there's currently no way to associate an MDNode directly
+/// with an llvm::GlobalValue, we create a global named metadata
+/// with the name 'clang.global.decl.ptrs'.
+void CodeGenModule::EmitDeclMetadata() {
+ llvm::NamedMDNode *GlobalMetadata = 0;
+
+ // StaticLocalDeclMap
+ for (llvm::DenseMap<GlobalDecl,llvm::StringRef>::iterator
+ I = MangledDeclNames.begin(), E = MangledDeclNames.end();
+ I != E; ++I) {
+ llvm::GlobalValue *Addr = getModule().getNamedValue(I->second);
+ EmitGlobalDeclMetadata(*this, GlobalMetadata, I->first, Addr);
+ }
+}
+
+/// Emits metadata nodes for all the local variables in the current
+/// function.
+void CodeGenFunction::EmitDeclMetadata() {
+ if (LocalDeclMap.empty()) return;
+
+ llvm::LLVMContext &Context = getLLVMContext();
+
+ // Find the unique metadata ID for this name.
+ unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
+
+ llvm::NamedMDNode *GlobalMetadata = 0;
+
+ for (llvm::DenseMap<const Decl*, llvm::Value*>::iterator
+ I = LocalDeclMap.begin(), E = LocalDeclMap.end(); I != E; ++I) {
+ const Decl *D = I->first;
+ llvm::Value *Addr = I->second;
+
+ if (llvm::AllocaInst *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
+ llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
+ Alloca->setMetadata(DeclPtrKind, llvm::MDNode::get(Context, &DAddr, 1));
+ } else if (llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
+ GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
+ EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
index 319744c4..27f15fc 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -75,6 +75,25 @@ namespace CodeGen {
class CGObjCRuntime;
class MangleBuffer;
+ struct OrderGlobalInits {
+ unsigned int priority;
+ unsigned int lex_order;
+ OrderGlobalInits(unsigned int p, unsigned int l)
+ : priority(p), lex_order(l) {}
+
+ bool operator==(const OrderGlobalInits &RHS) const {
+ return priority == RHS.priority &&
+ lex_order == RHS.lex_order;
+ }
+
+ bool operator<(const OrderGlobalInits &RHS) const {
+ if (priority < RHS.priority)
+ return true;
+
+ return priority == RHS.priority && lex_order < RHS.lex_order;
+ }
+ };
+
/// CodeGenModule - This class organizes the cross-function state that is used
/// while generating LLVM code.
class CodeGenModule : public BlockModule {
@@ -130,6 +149,10 @@ class CodeGenModule : public BlockModule {
/// priorities to be emitted when the translation unit is complete.
CtorList GlobalDtors;
+ /// MangledDeclNames - A map of canonical GlobalDecls to their mangled names.
+ llvm::DenseMap<GlobalDecl, llvm::StringRef> MangledDeclNames;
+ llvm::BumpPtrAllocator MangledNamesAllocator;
+
std::vector<llvm::Constant*> Annotations;
llvm::StringMap<llvm::Constant*> CFConstantStringMap;
@@ -139,10 +162,16 @@ class CodeGenModule : public BlockModule {
/// CXXGlobalInits - Global variables with initializers that need to run
/// before main.
std::vector<llvm::Constant*> CXXGlobalInits;
+
+ /// - Global variables with initializers whose order of initialization
+ /// is set by init_priority attribute.
+
+ llvm::SmallVector<std::pair<OrderGlobalInits, llvm::Function*>, 8>
+ PrioritizedCXXGlobalInits;
/// CXXGlobalDtors - Global destructor functions and arguments that need to
/// run on termination.
- std::vector<std::pair<llvm::Constant*,llvm::Constant*> > CXXGlobalDtors;
+ std::vector<std::pair<llvm::WeakVH,llvm::Constant*> > CXXGlobalDtors;
/// CFConstantStringClassRef - Cached reference to the class for constant
/// strings. This value has type int * but is actually an Obj-C class pointer.
@@ -315,6 +344,10 @@ public:
llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type);
+ // GetCXXMemberFunctionPointerValue - Given a method declaration, return the
+ // integer used in a member function pointer to refer to that value.
+ llvm::Constant *GetCXXMemberFunctionPointerValue(const CXXMethodDecl *MD);
+
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
@@ -346,7 +379,9 @@ public:
/// AddCXXDtorEntry - Add a destructor and object to add to the C++ global
/// destructor function.
- void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object);
+ void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object) {
+ CXXGlobalDtors.push_back(std::make_pair(DtorFn, Object));
+ }
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
@@ -409,9 +444,13 @@ public:
/// which only apply to a function definintion.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
- /// ReturnTypeUsesSret - Return true iff the given type uses 'sret' when used
+ /// ReturnTypeUsesSRet - Return true iff the given type uses 'sret' when used
/// as a return type.
- bool ReturnTypeUsesSret(const CGFunctionInfo &FI);
+ bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
+
+ /// ReturnTypeUsesSret - Return true iff the given type uses 'fpret' when used
+ /// as a return type.
+ bool ReturnTypeUsesFPRet(QualType ResultType);
/// ConstructAttributeList - Get the LLVM attributes and calling convention to
/// use for a particular function type.
@@ -427,15 +466,8 @@ public:
AttributeListType &PAL,
unsigned &CallingConv);
- void getMangledName(MangleBuffer &Buffer, GlobalDecl D);
- void getMangledName(MangleBuffer &Buffer, const NamedDecl *ND);
- void getMangledName(MangleBuffer &Buffer, const BlockDecl *BD);
- void getMangledCXXCtorName(MangleBuffer &Buffer,
- const CXXConstructorDecl *D,
- CXXCtorType Type);
- void getMangledCXXDtorName(MangleBuffer &Buffer,
- const CXXDestructorDecl *D,
- CXXDtorType Type);
+ llvm::StringRef getMangledName(GlobalDecl GD);
+ void getMangledName(GlobalDecl GD, MangleBuffer &Buffer, const BlockDecl *BD);
void EmitTentativeDefinition(const VarDecl *D);
@@ -566,6 +598,8 @@ private:
/// references to global which may otherwise be optimized out.
void EmitLLVMUsed(void);
+ void EmitDeclMetadata();
+
/// MayDeferGeneration - Determine if the given decl can be emitted
/// lazily; this is only relevant for definitions. The given decl
/// must be either a function or var decl.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
index a46dc72..d469b90 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -42,11 +42,13 @@ CodeGenTypes::~CodeGenTypes() {
delete &*I++;
}
-/// ConvertType - Convert the specified type to its LLVM form.
-const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
- llvm::PATypeHolder Result = ConvertTypeRecursive(T);
-
- // Any pointers that were converted defered evaluation of their pointee type,
+/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
+/// pointers that are referenced but have not been converted yet. This is used
+/// to handle cyclic structures properly.
+void CodeGenTypes::HandleLateResolvedPointers() {
+ assert(!PointersToResolve.empty() && "No pointers to resolve!");
+
+ // Any pointers that were converted deferred evaluation of their pointee type,
// creating an opaque type instead. This is in order to avoid problems with
// circular types. Loop through all these defered pointees, if any, and
// resolve them now.
@@ -59,7 +61,21 @@ const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
P.second->refineAbstractTypeTo(NT);
}
+}
+
+/// ConvertType - Convert the specified type to its LLVM form.
+const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) {
+ const llvm::Type *Result = ConvertTypeRecursive(T);
+
+ // If this is a top-level call to ConvertType and sub-conversions caused
+ // pointers to get lazily built as opaque types, resolve the pointers, which
+ // might cause Result to be merged away.
+ if (!IsRecursive && !PointersToResolve.empty()) {
+ llvm::PATypeHolder ResultHandle = Result;
+ HandleLateResolvedPointers();
+ Result = ResultHandle;
+ }
return Result;
}
@@ -80,21 +96,12 @@ const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
return ResultType;
}
-const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) {
- const llvm::Type *ResultType = ConvertTypeRecursive(T);
- if (ResultType->isIntegerTy(1))
- return llvm::IntegerType::get(getLLVMContext(),
- (unsigned)Context.getTypeSize(T));
- // FIXME: Should assert that the llvm type and AST type has the same size.
- return ResultType;
-}
-
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
-const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
- const llvm::Type *R = ConvertType(T);
+const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){
+ const llvm::Type *R = ConvertType(T, IsRecursive);
// If this is a non-bool type, don't map it.
if (!R->isIntegerTy(1))
@@ -108,7 +115,7 @@ const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
// Code to verify a given function type is complete, i.e. the return type
// and all of the argument types are complete.
-static const TagType *VerifyFuncTypeComplete(const Type* T) {
+const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) {
const FunctionType *FT = cast<FunctionType>(T);
if (const TagType* TT = FT->getResultType()->getAs<TagType>())
if (!TT->getDecl()->isDefinition())
@@ -201,7 +208,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::ObjCSel:
// LLVM void type can only be used as the result of a function call. Just
// map to the same as char.
- return llvm::IntegerType::get(getLLVMContext(), 8);
+ return llvm::Type::getInt8Ty(getLLVMContext());
case BuiltinType::Bool:
// Note that we always return bool as i1 for use as a scalar type.
@@ -233,7 +240,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::NullPtr: {
// Model std::nullptr_t as i8*
- const llvm::Type *Ty = llvm::IntegerType::get(getLLVMContext(), 8);
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
return llvm::PointerType::getUnqual(Ty);
}
@@ -284,7 +291,8 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
assert(A.getIndexTypeCVRQualifiers() == 0 &&
"FIXME: We only handle trivial array types so far!");
// int X[] -> [0 x int]
- return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 0);
+ return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()),
+ 0);
}
case Type::ConstantArray: {
const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
@@ -299,8 +307,12 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
}
case Type::FunctionNoProto:
case Type::FunctionProto: {
- // First, check whether we can build the full function type.
- if (const TagType* TT = VerifyFuncTypeComplete(&Ty)) {
+ // First, check whether we can build the full function type. If the
+ // function type depends on an incomplete type (e.g. a struct or enum), we
+ // cannot lower the function type. Instead, turn it into an Opaque pointer
+ // and have UpdateCompletedType revisit the function type when/if the opaque
+ // argument type is defined.
+ if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) {
// This function's type depends on an incomplete tag type; make sure
// we have an opaque type corresponding to the tag type.
ConvertTagDeclType(TT->getDecl());
@@ -309,17 +321,25 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
FunctionTypes.insert(std::make_pair(&Ty, ResultType));
return ResultType;
}
+
// The function type can be built; call the appropriate routines to
// build it.
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty))
- return GetFunctionType(getFunctionInfo(
- CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT,0))),
- FPT->isVariadic());
-
- const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
- return GetFunctionType(getFunctionInfo(
- CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT,0))),
- true);
+ const CGFunctionInfo *FI;
+ bool isVariadic;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) {
+ FI = &getFunctionInfo(
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)),
+ true /*Recursive*/);
+ isVariadic = FPT->isVariadic();
+ } else {
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
+ FI = &getFunctionInfo(
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)),
+ true /*Recursive*/);
+ isVariadic = true;
+ }
+
+ return GetFunctionType(*FI, isVariadic, true);
}
case Type::ObjCObject:
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
index fc28c3a..c7f48e6 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
@@ -94,6 +94,12 @@ private:
/// is available only for ConvertType(). CovertType() is preferred
/// interface to convert type T into a llvm::Type.
const llvm::Type *ConvertNewType(QualType T);
+
+ /// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
+ /// pointers that are referenced but have not been converted yet. This is
+ /// used to handle cyclic structures properly.
+ void HandleLateResolvedPointers();
+
public:
CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
const ABIInfo &Info);
@@ -106,22 +112,29 @@ public:
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
/// ConvertType - Convert type T into a llvm::Type.
- const llvm::Type *ConvertType(QualType T);
+ const llvm::Type *ConvertType(QualType T, bool IsRecursive = false);
const llvm::Type *ConvertTypeRecursive(QualType T);
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
- const llvm::Type *ConvertTypeForMem(QualType T);
- const llvm::Type *ConvertTypeForMemRecursive(QualType T);
+ const llvm::Type *ConvertTypeForMem(QualType T, bool IsRecursive = false);
+ const llvm::Type *ConvertTypeForMemRecursive(QualType T) {
+ return ConvertTypeForMem(T, true);
+ }
/// GetFunctionType - Get the LLVM function type for \arg Info.
const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
- bool IsVariadic);
+ bool IsVariadic,
+ bool IsRecursive = false);
const llvm::FunctionType *GetFunctionType(GlobalDecl GD);
+ /// VerifyFuncTypeComplete - Utility to check whether a function type can
+ /// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag
+ /// type).
+ static const TagType *VerifyFuncTypeComplete(const Type* T);
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
/// given a CXXMethodDecl. If the method to has an incomplete return type,
@@ -150,8 +163,11 @@ public:
return getFunctionInfo(Ty->getResultType(), Args,
Ty->getExtInfo());
}
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty);
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty);
+
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty,
+ bool IsRecursive = false);
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty,
+ bool IsRecursive = false);
// getFunctionInfo - Get the function info for a member function.
const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
@@ -172,7 +188,8 @@ public:
/// \param ArgTys - must all actually be canonical as params
const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info);
+ const FunctionType::ExtInfo &Info,
+ bool IsRecursive = false);
/// \brief Compute a new LLVM record layout object for the given record.
CGRecordLayout *ComputeRecordLayout(const RecordDecl *D);
@@ -185,7 +202,8 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// GetExpandedTypes - Expand the type \arg Ty into the LLVM
/// argument types it would be passed as on the provided vector \arg
/// ArgTys. See ABIArgInfo::Expand.
- void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys);
+ void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys,
+ bool IsRecursive);
/// ContainsPointerToDataMember - Return whether the given type contains a
/// pointer to a data member.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
index b8a98d7..26dea40 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
@@ -36,7 +36,7 @@ class GlobalDecl {
Value.setPointer(D);
}
-
+
public:
GlobalDecl() {}
@@ -50,6 +50,14 @@ public:
GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
: Value(D, Type) {}
+ GlobalDecl getCanonicalDecl() const {
+ GlobalDecl CanonGD;
+ CanonGD.Value.setPointer(Value.getPointer()->getCanonicalDecl());
+ CanonGD.Value.setInt(Value.getInt());
+
+ return CanonGD;
+ }
+
const Decl *getDecl() const { return Value.getPointer(); }
CXXCtorType getCtorType() const {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Makefile b/contrib/llvm/tools/clang/lib/CodeGen/Makefile
index 3cea6bb..4b93524 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/Makefile
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Makefile
@@ -12,14 +12,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangCodeGen
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-ifdef CLANG_VENDOR
-CPP.Flags += -DCLANG_VENDOR='"$(CLANG_VENDOR) "'
-endif
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
index 6c2a648..30ee541 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
@@ -40,7 +40,7 @@ MiscNameMangler::MiscNameMangler(MangleContext &C,
llvm::SmallVectorImpl<char> &Res)
: Context(C), Out(Res) { }
-void MiscNameMangler::mangleBlock(const BlockDecl *BD) {
+void MiscNameMangler::mangleBlock(GlobalDecl GD, const BlockDecl *BD) {
// Mangle the context of the block.
// FIXME: We currently mimic GCC's mangling scheme, which leaves much to be
// desired. Come up with a better mangling scheme.
@@ -55,6 +55,16 @@ void MiscNameMangler::mangleBlock(const BlockDecl *BD) {
const NamedDecl *ND = cast<NamedDecl>(DC);
if (IdentifierInfo *II = ND->getIdentifier())
Out << II->getName();
+ else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND)) {
+ llvm::SmallString<64> Buffer;
+ Context.mangleCXXDtor(D, GD.getDtorType(), Buffer);
+ Out << Buffer;
+ }
+ else if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND)) {
+ llvm::SmallString<64> Buffer;
+ Context.mangleCXXCtor(D, GD.getCtorType(), Buffer);
+ Out << Buffer;
+ }
else {
// FIXME: We were doing a mangleUnqualifiedName() before, but that's
// a private member of a class that will soon itself be private to the
@@ -125,19 +135,24 @@ class CXXNameMangler {
const CXXMethodDecl *Structor;
unsigned StructorType;
+ /// SeqID - The next subsitution sequence number.
+ unsigned SeqID;
+
llvm::DenseMap<uintptr_t, unsigned> Substitutions;
ASTContext &getASTContext() const { return Context.getASTContext(); }
public:
CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
- : Context(C), Out(Res), Structor(0), StructorType(0) { }
+ : Context(C), Out(Res), Structor(0), StructorType(0), SeqID(0) { }
CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
const CXXConstructorDecl *D, CXXCtorType Type)
- : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { }
+ : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
const CXXDestructorDecl *D, CXXDtorType Type)
- : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { }
+ : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
#if MANGLE_CHECKER
~CXXNameMangler() {
@@ -154,7 +169,9 @@ public:
void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z");
void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
+ void mangleNumber(const llvm::APSInt &I);
void mangleNumber(int64_t Number);
+ void mangleFloat(const llvm::APFloat &F);
void mangleFunctionEncoding(const FunctionDecl *FD);
void mangleName(const NamedDecl *ND);
void mangleType(QualType T);
@@ -215,6 +232,7 @@ private:
#include "clang/AST/TypeNodes.def"
void mangleType(const TagType*);
+ void mangleType(TemplateName);
void mangleBareFunctionType(const FunctionType *T,
bool MangleReturnType);
@@ -279,7 +297,7 @@ bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
if (!FD) {
const DeclContext *DC = D->getDeclContext();
// Check for extern variable declared locally.
- if (isa<FunctionDecl>(DC) && D->hasLinkage())
+ if (DC->isFunctionOrMethod() && D->hasLinkage())
while (!DC->isNamespace() && !DC->isTranslationUnit())
DC = DC->getParent();
if (DC->isTranslationUnit() && D->getLinkage() != InternalLinkage)
@@ -357,12 +375,6 @@ void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
mangleBareFunctionType(FT, MangleReturnType);
}
-/// isStd - Return whether a given namespace is the 'std' namespace.
-static bool isStd(const NamespaceDecl *NS) {
- const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
- return II && II->isStr("std");
-}
-
static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
while (isa<LinkageSpecDecl>(DC)) {
DC = DC->getParent();
@@ -371,15 +383,21 @@ static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
return DC;
}
+/// isStd - Return whether a given namespace is the 'std' namespace.
+static bool isStd(const NamespaceDecl *NS) {
+ if (!IgnoreLinkageSpecDecls(NS->getParent())->isTranslationUnit())
+ return false;
+
+ const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
+ return II && II->isStr("std");
+}
+
// isStdNamespace - Return whether a given decl context is a toplevel 'std'
// namespace.
static bool isStdNamespace(const DeclContext *DC) {
if (!DC->isNamespace())
return false;
- if (!IgnoreLinkageSpecDecls(DC->getParent())->isTranslationUnit())
- return false;
-
return isStd(cast<NamespaceDecl>(DC));
}
@@ -511,6 +529,21 @@ void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
addSubstitution(Template);
}
+void CXXNameMangler::mangleFloat(const llvm::APFloat &F) {
+ // TODO: avoid this copy with careful stream management.
+ llvm::SmallString<20> Buffer;
+ F.bitcastToAPInt().toString(Buffer, 16, false);
+ Out.write(Buffer.data(), Buffer.size());
+}
+
+void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
+ if (Value.isSigned() && Value.isNegative()) {
+ Out << 'n';
+ Value.abs().print(Out, true);
+ } else
+ Value.print(Out, Value.isSigned());
+}
+
void CXXNameMangler::mangleNumber(int64_t Number) {
// <number> ::= [n] <non-negative decimal integer>
if (Number < 0) {
@@ -593,6 +626,28 @@ void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *Qualifier,
mangleUnqualifiedName(0, Name, KnownArity);
}
+static const FieldDecl *FindFirstNamedDataMember(const RecordDecl *RD) {
+ assert(RD->isAnonymousStructOrUnion() &&
+ "Expected anonymous struct or union!");
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I) {
+ const FieldDecl *FD = *I;
+
+ if (FD->getIdentifier())
+ return FD;
+
+ if (const RecordType *RT = FD->getType()->getAs<RecordType>()) {
+ if (const FieldDecl *NamedDataMember =
+ FindFirstNamedDataMember(RT->getDecl()))
+ return NamedDataMember;
+ }
+ }
+
+ // We didn't find a named data member.
+ return 0;
+}
+
void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
DeclarationName Name,
unsigned KnownArity) {
@@ -625,6 +680,28 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
}
}
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ // We must have an anonymous union or struct declaration.
+ const RecordDecl *RD =
+ cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 5.1.2:
+ //
+ // For the purposes of mangling, the name of an anonymous union is
+ // considered to be the name of the first named data member found by a
+ // pre-order, depth-first, declaration-order walk of the data members of
+ // the anonymous union. If there is no such data member (i.e., if all of
+ // the data members in the union are unnamed), then there is no way for
+ // a program to refer to the anonymous union, and there is therefore no
+ // need to mangle its name.
+ const FieldDecl *FD = FindFirstNamedDataMember(RD);
+ assert(FD && "Didn't find a named data member!");
+ assert(FD->getIdentifier() && "Data member name isn't an identifier!");
+
+ mangleSourceName(FD->getIdentifier());
+ break;
+ }
+
// We must have an anonymous struct.
const TagDecl *TD = cast<TagDecl>(ND);
if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
@@ -808,7 +885,7 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) {
manglePrefix(DC->getParent(), NoFunction);
llvm::SmallString<64> Name;
- Context.mangleBlock(Block, Name);
+ Context.mangleBlock(GlobalDecl(), Block, Name);
Out << Name.size() << Name;
return;
}
@@ -880,6 +957,53 @@ void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
addSubstitution(ND);
}
+/// Mangles a template name under the production <type>. Required for
+/// template template arguments.
+/// <type> ::= <class-enum-type>
+/// ::= <template-param>
+/// ::= <substitution>
+void CXXNameMangler::mangleType(TemplateName TN) {
+ if (mangleSubstitution(TN))
+ return;
+
+ TemplateDecl *TD = 0;
+
+ switch (TN.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
+ goto HaveDecl;
+
+ case TemplateName::Template:
+ TD = TN.getAsTemplateDecl();
+ goto HaveDecl;
+
+ HaveDecl:
+ if (isa<TemplateTemplateParmDecl>(TD))
+ mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex());
+ else
+ mangleName(TD);
+ break;
+
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("can't mangle an overloaded template name as a <type>");
+ break;
+
+ case TemplateName::DependentTemplate: {
+ const DependentTemplateName *Dependent = TN.getAsDependentTemplateName();
+ assert(Dependent->isIdentifier());
+
+ // <class-enum-type> ::= <name>
+ // <name> ::= <nested-name>
+ mangleUnresolvedScope(Dependent->getQualifier());
+ mangleSourceName(Dependent->getIdentifier());
+ break;
+ }
+
+ }
+
+ addSubstitution(TN);
+}
+
void
CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
switch (OO) {
@@ -1001,6 +1125,18 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
if (Quals.hasConst())
Out << 'K';
+ if (Quals.hasAddressSpace()) {
+ // Extension:
+ //
+ // <type> ::= U <address-space-number>
+ //
+ // where <address-space-number> is a source name consisting of 'AS'
+ // followed by the address space <number>.
+ llvm::SmallString<64> ASString;
+ ASString = "AS" + llvm::utostr_32(Quals.getAddressSpace());
+ Out << 'U' << ASString.size() << ASString;
+ }
+
// FIXME: For now, just drop all extension qualifiers on the floor.
}
@@ -1138,7 +1274,8 @@ void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
if (MangleReturnType)
mangleType(Proto->getResultType());
- if (Proto->getNumArgs() == 0) {
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ // <builtin-type> ::= v # void
Out << 'v';
return;
}
@@ -1204,6 +1341,22 @@ void CXXNameMangler::mangleType(const MemberPointerType *T) {
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals()));
mangleType(FPT);
+
+ // Itanium C++ ABI 5.1.8:
+ //
+ // The type of a non-static member function is considered to be different,
+ // for the purposes of substitution, from the type of a namespace-scope or
+ // static member function whose type appears similar. The types of two
+ // non-static member functions are considered to be different, for the
+ // purposes of substitution, if the functions are members of different
+ // classes. In other words, for the purposes of substitution, the class of
+ // which the function is a member is considered part of the type of
+ // function.
+
+ // We increment the SeqID here to emulate adding an entry to the
+ // substitution table. We can't actually add it because we don't want this
+ // particular function type to be substituted.
+ ++SeqID;
} else
mangleType(PointeeType);
}
@@ -1213,8 +1366,6 @@ void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
mangleTemplateParameter(T->getIndex());
}
-// FIXME: <type> ::= <template-template-param> <template-args>
-
// <type> ::= P <type> # pointer-to
void CXXNameMangler::mangleType(const PointerType *T) {
Out << 'P';
@@ -1244,12 +1395,20 @@ void CXXNameMangler::mangleType(const ComplexType *T) {
}
// GNU extension: vector types
-// <type> ::= <vector-type>
-// <vector-type> ::= Dv <positive dimension number> _ <element type>
-// ::= Dv [<dimension expression>] _ <element type>
+// <type> ::= <vector-type>
+// <vector-type> ::= Dv <positive dimension number> _
+// <extended element type>
+// ::= Dv [<dimension expression>] _ <element type>
+// <extended element type> ::= <element type>
+// ::= p # AltiVec vector pixel
void CXXNameMangler::mangleType(const VectorType *T) {
Out << "Dv" << T->getNumElements() << '_';
- mangleType(T->getElementType());
+ if (T->getAltiVecSpecific() == VectorType::Pixel)
+ Out << 'p';
+ else if (T->getAltiVecSpecific() == VectorType::Bool)
+ Out << 'b';
+ else
+ mangleType(T->getElementType());
}
void CXXNameMangler::mangleType(const ExtVectorType *T) {
mangleType(static_cast<const VectorType*>(T));
@@ -1303,23 +1462,25 @@ void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
void CXXNameMangler::mangleType(const DependentNameType *T) {
// Typename types are always nested
Out << 'N';
- if (T->getIdentifier()) {
- mangleUnresolvedScope(T->getQualifier());
- mangleSourceName(T->getIdentifier());
- } else {
- const TemplateSpecializationType *TST = T->getTemplateId();
- if (!mangleSubstitution(QualType(TST, 0))) {
- mangleTemplatePrefix(TST->getTemplateName());
-
- // FIXME: GCC does not appear to mangle the template arguments when
- // the template in question is a dependent template name. Should we
- // emulate that badness?
- mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
- TST->getNumArgs());
- addSubstitution(QualType(TST, 0));
- }
- }
-
+ mangleUnresolvedScope(T->getQualifier());
+ mangleSourceName(T->getIdentifier());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
+ // Dependently-scoped template types are always nested
+ Out << 'N';
+
+ // TODO: avoid making this TemplateName.
+ TemplateName Prefix =
+ getASTContext().getDependentTemplateName(T->getQualifier(),
+ T->getIdentifier());
+ mangleTemplatePrefix(Prefix);
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs());
Out << 'E';
}
@@ -1369,9 +1530,7 @@ void CXXNameMangler::mangleIntegerLiteral(QualType T,
// Boolean values are encoded as 0/1.
Out << (Value.getBoolValue() ? '1' : '0');
} else {
- if (Value.isNegative())
- Out << 'n';
- Value.abs().print(Out, false);
+ mangleNumber(Value);
}
Out << 'E';
@@ -1435,10 +1594,44 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
#define STMT(Type, Base) \
case Expr::Type##Class:
#include "clang/AST/StmtNodes.inc"
+ // fallthrough
+
+ // These all can only appear in local or variable-initialization
+ // contexts and so should never appear in a mangling.
+ case Expr::AddrLabelExprClass:
+ case Expr::BlockDeclRefExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::InitListExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::CXXScalarValueInitExprClass:
llvm_unreachable("unexpected statement kind");
break;
- default: {
+ // FIXME: invent manglings for all these.
+ case Expr::BlockExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::ChooseExprClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCImplicitSetterGetterRefExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCSuperExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::StmtExprClass:
+ case Expr::TypesCompatibleExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::VAArgExprClass: {
// As bad as this diagnostic is, it's better than crashing.
Diagnostic &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
@@ -1450,6 +1643,11 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::CXXDefaultArgExprClass:
+ mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr());
+ break;
+
+ case Expr::CXXMemberCallExprClass: // fallthrough
case Expr::CallExprClass: {
const CallExpr *CE = cast<CallExpr>(E);
Out << "cl";
@@ -1460,6 +1658,26 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::CXXNewExprClass: {
+ // Proposal from David Vandervoorde, 2010.06.30
+ const CXXNewExpr *New = cast<CXXNewExpr>(E);
+ if (New->isGlobalNew()) Out << "gs";
+ Out << (New->isArray() ? "na" : "nw");
+ for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(),
+ E = New->placement_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ Out << '_';
+ mangleType(New->getAllocatedType());
+ if (New->hasInitializer()) {
+ Out << "pi";
+ for (CXXNewExpr::const_arg_iterator I = New->constructor_arg_begin(),
+ E = New->constructor_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ }
+ Out << 'E';
+ break;
+ }
+
case Expr::MemberExprClass: {
const MemberExpr *ME = cast<MemberExpr>(E);
mangleMemberExpr(ME->getBase(), ME->isArrow(),
@@ -1533,6 +1751,43 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::CXXThrowExprClass: {
+ const CXXThrowExpr *TE = cast<CXXThrowExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TE->getSubExpr()) {
+ Out << "tw";
+ mangleExpression(TE->getSubExpr());
+ } else {
+ Out << "tr";
+ }
+ break;
+ }
+
+ case Expr::CXXTypeidExprClass: {
+ const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TIE->isTypeOperand()) {
+ Out << "ti";
+ mangleType(TIE->getTypeOperand());
+ } else {
+ Out << "te";
+ mangleExpression(TIE->getExprOperand());
+ }
+ break;
+ }
+
+ case Expr::CXXDeleteExprClass: {
+ const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (DE->isGlobalDelete()) Out << "gs";
+ Out << (DE->isArrayForm() ? "da" : "dl");
+ mangleExpression(DE->getArgument());
+ break;
+ }
+
case Expr::UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(E);
mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
@@ -1541,6 +1796,18 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E);
+
+ // Array subscript is treated as a syntactically wierd form of
+ // binary operator.
+ Out << "ix";
+ mangleExpression(AE->getLHS());
+ mangleExpression(AE->getRHS());
+ break;
+ }
+
+ case Expr::CompoundAssignOperatorClass: // fallthrough
case Expr::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(E);
mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
@@ -1657,12 +1924,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
const FloatingLiteral *FL = cast<FloatingLiteral>(E);
Out << 'L';
mangleType(FL->getType());
-
- // TODO: avoid this copy with careful stream management.
- llvm::SmallString<20> Buffer;
- FL->getValue().bitcastToAPInt().toString(Buffer, 16, false);
- Out.write(Buffer.data(), Buffer.size());
-
+ mangleFloat(FL->getValue());
Out << 'E';
break;
}
@@ -1680,16 +1942,62 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
Out << 'E';
break;
- case Expr::IntegerLiteralClass:
- mangleIntegerLiteral(E->getType(),
- llvm::APSInt(cast<IntegerLiteral>(E)->getValue()));
+ case Expr::IntegerLiteralClass: {
+ llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue());
+ if (E->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleIntegerLiteral(E->getType(), Value);
break;
+ }
+ case Expr::ImaginaryLiteralClass: {
+ const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
+ // Mangle as if a complex literal.
+ // Proposal from David Vandervoorde, 2010.06.30.
+ Out << 'L';
+ mangleType(E->getType());
+ if (const FloatingLiteral *Imag =
+ dyn_cast<FloatingLiteral>(IE->getSubExpr())) {
+ // Mangle a floating-point zero of the appropriate type.
+ mangleFloat(llvm::APFloat(Imag->getValue().getSemantics()));
+ Out << '_';
+ mangleFloat(Imag->getValue());
+ } else {
+ Out << '0' << '_';
+ llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue());
+ if (IE->getSubExpr()->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleNumber(Value);
+ }
+ Out << 'E';
+ break;
}
-}
-// FIXME: <type> ::= G <type> # imaginary (C 2000)
-// FIXME: <type> ::= U <source-name> <type> # vendor extended type qualifier
+ case Expr::StringLiteralClass: {
+ // Proposal from David Vandervoorde, 2010.06.30.
+ // I've sent a comment off asking whether this needs to also
+ // represent the length of the string.
+ Out << 'L';
+ const ConstantArrayType *T = cast<ConstantArrayType>(E->getType());
+ QualType CharTy = T->getElementType().getUnqualifiedType();
+ mangleType(CharTy);
+ Out << 'E';
+ break;
+ }
+
+ case Expr::GNUNullExprClass:
+ // FIXME: should this really be mangled the same as nullptr?
+ // fallthrough
+
+ case Expr::CXXNullPtrLiteralExprClass: {
+ // Proposal from David Vandervoorde, 2010.06.30, as
+ // modified by ABI list discussion.
+ Out << "LDnE";
+ break;
+ }
+
+ }
+}
void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
// <ctor-dtor-name> ::= C1 # complete object constructor
@@ -1774,9 +2082,8 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
mangleType(A.getAsType());
break;
case TemplateArgument::Template:
- assert(A.getAsTemplate().getAsTemplateDecl() &&
- "Can't get dependent template names here");
- mangleName(A.getAsTemplate().getAsTemplateDecl());
+ // This is mangled as <type>.
+ mangleType(A.getAsTemplate());
break;
case TemplateArgument::Expression:
Out << 'X';
@@ -1882,7 +2189,7 @@ bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
while (SeqID) {
assert(BufferPtr > Buffer && "Buffer overflow!");
- unsigned char c = static_cast<unsigned char>(SeqID) % 36;
+ char c = static_cast<char>(SeqID % 36);
*--BufferPtr = (c < 10 ? '0' + c : 'A' + c - 10);
SeqID /= 36;
@@ -2049,10 +2356,8 @@ void CXXNameMangler::addSubstitution(TemplateName Template) {
}
void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
- unsigned SeqID = Substitutions.size();
-
assert(!Substitutions.count(Ptr) && "Substitution already exists!");
- Substitutions[Ptr] = SeqID;
+ Substitutions[Ptr] = SeqID++;
}
//
@@ -2092,10 +2397,10 @@ void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
Mangler.mangle(D);
}
-void MangleContext::mangleBlock(const BlockDecl *BD,
+void MangleContext::mangleBlock(GlobalDecl GD, const BlockDecl *BD,
llvm::SmallVectorImpl<char> &Res) {
MiscNameMangler Mangler(*this, Res);
- Mangler.mangleBlock(BD);
+ Mangler.mangleBlock(GD, BD);
}
void MangleContext::mangleThunk(const CXXMethodDecl *MD,
@@ -2155,6 +2460,15 @@ void MangleContext::mangleGuardVariable(const VarDecl *D,
Mangler.mangleName(D);
}
+void MangleContext::mangleReferenceTemporary(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &Res) {
+ // We match the GCC mangling here.
+ // <special-name> ::= GR <object name>
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZGR";
+ Mangler.mangleName(D);
+}
+
void MangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
llvm::SmallVectorImpl<char> &Res) {
// <special-name> ::= TV <type> # virtual table
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
index f1c5358..139f6c0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
@@ -19,6 +19,7 @@
#define LLVM_CLANG_CODEGEN_MANGLE_H
#include "CGCXX.h"
+#include "GlobalDecl.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
@@ -84,6 +85,8 @@ public:
Diagnostic &Diags)
: Context(Context), Diags(Diags) { }
+ virtual ~MangleContext() { }
+
ASTContext &getASTContext() const { return Context; }
Diagnostic &getDiags() const { return Diags; }
@@ -108,7 +111,7 @@ public:
/// @name Mangler Entry Points
/// @{
- bool shouldMangleDeclName(const NamedDecl *D);
+ virtual bool shouldMangleDeclName(const NamedDecl *D);
virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
virtual void mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
@@ -118,6 +121,8 @@ public:
llvm::SmallVectorImpl<char> &);
virtual void mangleGuardVariable(const VarDecl *D,
llvm::SmallVectorImpl<char> &);
+ virtual void mangleReferenceTemporary(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &);
virtual void mangleCXXVTable(const CXXRecordDecl *RD,
llvm::SmallVectorImpl<char> &);
virtual void mangleCXXVTT(const CXXRecordDecl *RD,
@@ -131,7 +136,8 @@ public:
llvm::SmallVectorImpl<char> &);
virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
llvm::SmallVectorImpl<char> &);
- void mangleBlock(const BlockDecl *BD, llvm::SmallVectorImpl<char> &);
+ void mangleBlock(GlobalDecl GD,
+ const BlockDecl *BD, llvm::SmallVectorImpl<char> &);
void mangleInitDiscriminator() {
Discriminator = 0;
@@ -161,7 +167,7 @@ public:
llvm::raw_svector_ostream &getStream() { return Out; }
- void mangleBlock(const BlockDecl *BD);
+ void mangleBlock(GlobalDecl GD, const BlockDecl *BD);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
};
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
new file mode 100644
index 0000000..da0fdb6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -0,0 +1,1191 @@
+//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targetting the Microsoft Visual C++ ABI.
+// The class in this file generates structures that follow the Microsoft
+// Visual C++ ABI, which is actually not very well documented at all outside
+// of Microsoft.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "CGVTables.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftCXXNameMangler {
+ MangleContext &Context;
+ llvm::raw_svector_ostream Out;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ MicrosoftCXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
+ : Context(C), Out(Res) { }
+
+ llvm::raw_svector_ostream &getStream() { return Out; }
+
+ void mangle(const NamedDecl *D, llvm::StringRef Prefix = "?");
+ void mangleName(const NamedDecl *ND);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleVariableEncoding(const VarDecl *VD);
+ void mangleNumber(int64_t Number);
+ void mangleType(QualType T);
+
+private:
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName());
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
+ void mangleSourceName(const IdentifierInfo *II);
+ void manglePostfix(const DeclContext *DC, bool NoFunction=false);
+ void mangleOperatorName(OverloadedOperatorKind OO);
+ void mangleQualifiers(Qualifiers Quals, bool IsMember);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleType(const FunctionType *T, const FunctionDecl *D,
+ bool IsStructor, bool IsInstMethod);
+ void mangleType(const ArrayType *T, bool IsGlobal);
+ void mangleExtraDimensions(QualType T);
+ void mangleFunctionClass(const FunctionDecl *FD);
+ void mangleCallingConvention(const FunctionType *T);
+ void mangleThrowSpecification(const FunctionProtoType *T);
+
+};
+
+/// MicrosoftMangleContext - Overrides the default MangleContext for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftMangleContext : public MangleContext {
+public:
+ MicrosoftMangleContext(ASTContext &Context,
+ Diagnostic &Diags) : MangleContext(Context, Diags) { }
+ virtual bool shouldMangleDeclName(const NamedDecl *D);
+ virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
+ virtual void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &);
+};
+
+class MicrosoftCXXABI : public CXXABI {
+ MicrosoftMangleContext MangleCtx;
+public:
+ MicrosoftCXXABI(CodeGenModule &CGM)
+ : MangleCtx(CGM.getContext(), CGM.getDiags()) {}
+
+ MicrosoftMangleContext &getMangleContext() {
+ return MangleCtx;
+ }
+};
+
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
+ for (const DeclContext *DC = D->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+ // (always) as does passing a C++ member function and a function
+ // whose name is not a simple identifier.
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+ !FD->getDeclName().isIdentifier()))
+ return true;
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOptions().CPlusPlus)
+ return false;
+
+ // Variables at global scope with internal linkage are not mangled.
+ if (!FD) {
+ const DeclContext *DC = D->getDeclContext();
+ if (DC->isTranslationUnit() && D->getLinkage() == InternalLinkage)
+ return false;
+ }
+
+ // C functions and "main" are not mangled.
+ if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+ return false;
+
+ return true;
+}
+
+void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
+ llvm::StringRef Prefix) {
+ // MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
+ // Therefore it's really important that we don't decorate the
+ // name with leading underscores or leading/trailing at signs. So, emit a
+ // asm marker at the start so we get the name right.
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+ Out << ALA->getLabel();
+ return;
+ }
+
+ // <mangled-name> ::= ? <name> <type-encoding>
+ Out << Prefix;
+ mangleName(D);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleVariableEncoding(VD);
+ // TODO: Fields? Can MSVC even mangle them?
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <type-encoding> ::= <function-class> <function-type>
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ // We should never ever see a FunctionNoProtoType at this point.
+ // We don't even know how to mangle their types anyway :).
+ const FunctionProtoType *FT = cast<FunctionProtoType>(FD->getType());
+
+ bool InStructor = false, InInstMethod = false;
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD) {
+ if (MD->isInstance())
+ InInstMethod = true;
+ if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))
+ InStructor = true;
+ }
+
+ // First, the function class.
+ mangleFunctionClass(FD);
+
+ mangleType(FT, FD, InStructor, InInstMethod);
+}
+
+void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
+ // <type-encoding> ::= <storage-class> <variable-type>
+ // <storage-class> ::= 0 # private static member
+ // ::= 1 # protected static member
+ // ::= 2 # public static member
+ // ::= 3 # global
+ // ::= 4 # static local
+
+ // The first character in the encoding (after the name) is the storage class.
+ if (VD->isStaticDataMember()) {
+ // If it's a static member, it also encodes the access level.
+ switch (VD->getAccess()) {
+ default:
+ case AS_private: Out << '0'; break;
+ case AS_protected: Out << '1'; break;
+ case AS_public: Out << '2'; break;
+ }
+ }
+ else if (!VD->isStaticLocal())
+ Out << '3';
+ else
+ Out << '4';
+ // Now mangle the type.
+ // <variable-type> ::= <type> <cvr-qualifiers>
+ // ::= <type> A # pointers, references, arrays
+ // Pointers and references are odd. The type of 'int * const foo;' gets
+ // mangled as 'QAHA' instead of 'PAHB', for example.
+ QualType Ty = VD->getType();
+ if (Ty->isPointerType() || Ty->isReferenceType()) {
+ mangleType(Ty);
+ Out << 'A';
+ } else if (Ty->isArrayType()) {
+ // Global arrays are funny, too.
+ mangleType(static_cast<ArrayType *>(Ty.getTypePtr()), true);
+ Out << 'A';
+ } else {
+ mangleType(Ty.getLocalUnqualifiedType());
+ mangleQualifiers(Ty.getLocalQualifiers(), false);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
+ const DeclContext *DC = ND->getDeclContext();
+
+ // Always start with the unqualified name.
+ mangleUnqualifiedName(ND);
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ manglePostfix(DC);
+
+ // Terminate the whole name with an '@'.
+ Out << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [?] <decimal digit> # <= 9
+ // ::= [?] <hex digit>+ @ # > 9; A = 0, B = 1, etc...
+ if (Number < 0) {
+ Out << '?';
+ Number = -Number;
+ }
+ if (Number >= 1 && Number <= 10) {
+ Out << Number-1;
+ } else {
+ // We have to build up the encoding in reverse order, so it will come
+ // out right when we write it out.
+ char Encoding[16];
+ char *EndPtr = Encoding+sizeof(Encoding);
+ char *CurPtr = EndPtr;
+ while (Number) {
+ *--CurPtr = 'A' + (Number % 16);
+ Number /= 16;
+ }
+ Out.write(CurPtr, EndPtr-CurPtr);
+ Out << '@';
+ }
+}
+
+void
+MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ Out << "?A";
+ break;
+ }
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // When VC encounters an anonymous type with no tag and no typedef,
+ // it literally emits '<unnamed-tag>'.
+ Out << "<unnamed-tag>";
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ assert(false && "Can't mangle Objective-C selector names here!");
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ assert(false && "Can't mangle constructors yet!");
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ assert(false && "Can't mangle destructors yet!");
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= ?B # (cast)
+ // The target type is encoded as the return type.
+ Out << "?B";
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ mangleOperatorName(Name.getCXXOverloadedOperator());
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ // FIXME: Was this added in VS2010? Does MS even know how to mangle this?
+ assert(false && "Don't know how to mangle literal operators yet!");
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ assert(false && "Can't mangle a using directive name!");
+ break;
+ }
+}
+
+void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
+ bool NoFunction) {
+ // <postfix> ::= <unqualified-name> [<postfix>]
+ // ::= <template-postfix> <template-args> [<postfix>]
+ // ::= <template-param>
+ // ::= <substitution> [<postfix>]
+
+ if (!DC) return;
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ llvm::SmallString<64> Name;
+ Context.mangleBlock(GlobalDecl(), BD, Name);
+ Out << Name << '@';
+ return manglePostfix(DC->getParent(), NoFunction);
+ }
+
+ if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
+ return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ mangleUnqualifiedName(cast<NamedDecl>(DC));
+ manglePostfix(DC->getParent(), NoFunction);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
+ switch (OO) {
+ // ?0 # constructor
+ // ?1 # destructor
+ // <operator-name> ::= ?2 # new
+ case OO_New: Out << "?2"; break;
+ // <operator-name> ::= ?3 # delete
+ case OO_Delete: Out << "?3"; break;
+ // <operator-name> ::= ?4 # =
+ case OO_Equal: Out << "?4"; break;
+ // <operator-name> ::= ?5 # >>
+ case OO_GreaterGreater: Out << "?5"; break;
+ // <operator-name> ::= ?6 # <<
+ case OO_LessLess: Out << "?6"; break;
+ // <operator-name> ::= ?7 # !
+ case OO_Exclaim: Out << "?7"; break;
+ // <operator-name> ::= ?8 # ==
+ case OO_EqualEqual: Out << "?8"; break;
+ // <operator-name> ::= ?9 # !=
+ case OO_ExclaimEqual: Out << "?9"; break;
+ // <operator-name> ::= ?A # []
+ case OO_Subscript: Out << "?A"; break;
+ // ?B # conversion
+ // <operator-name> ::= ?C # ->
+ case OO_Arrow: Out << "?C"; break;
+ // <operator-name> ::= ?D # *
+ case OO_Star: Out << "?D"; break;
+ // <operator-name> ::= ?E # ++
+ case OO_PlusPlus: Out << "?E"; break;
+ // <operator-name> ::= ?F # --
+ case OO_MinusMinus: Out << "?F"; break;
+ // <operator-name> ::= ?G # -
+ case OO_Minus: Out << "?G"; break;
+ // <operator-name> ::= ?H # +
+ case OO_Plus: Out << "?H"; break;
+ // <operator-name> ::= ?I # &
+ case OO_Amp: Out << "?I"; break;
+ // <operator-name> ::= ?J # ->*
+ case OO_ArrowStar: Out << "?J"; break;
+ // <operator-name> ::= ?K # /
+ case OO_Slash: Out << "?K"; break;
+ // <operator-name> ::= ?L # %
+ case OO_Percent: Out << "?L"; break;
+ // <operator-name> ::= ?M # <
+ case OO_Less: Out << "?M"; break;
+ // <operator-name> ::= ?N # <=
+ case OO_LessEqual: Out << "?N"; break;
+ // <operator-name> ::= ?O # >
+ case OO_Greater: Out << "?O"; break;
+ // <operator-name> ::= ?P # >=
+ case OO_GreaterEqual: Out << "?P"; break;
+ // <operator-name> ::= ?Q # ,
+ case OO_Comma: Out << "?Q"; break;
+ // <operator-name> ::= ?R # ()
+ case OO_Call: Out << "?R"; break;
+ // <operator-name> ::= ?S # ~
+ case OO_Tilde: Out << "?S"; break;
+ // <operator-name> ::= ?T # ^
+ case OO_Caret: Out << "?T"; break;
+ // <operator-name> ::= ?U # |
+ case OO_Pipe: Out << "?U"; break;
+ // <operator-name> ::= ?V # &&
+ case OO_AmpAmp: Out << "?V"; break;
+ // <operator-name> ::= ?W # ||
+ case OO_PipePipe: Out << "?W"; break;
+ // <operator-name> ::= ?X # *=
+ case OO_StarEqual: Out << "?X"; break;
+ // <operator-name> ::= ?Y # +=
+ case OO_PlusEqual: Out << "?Y"; break;
+ // <operator-name> ::= ?Z # -=
+ case OO_MinusEqual: Out << "?Z"; break;
+ // <operator-name> ::= ?_0 # /=
+ case OO_SlashEqual: Out << "?_0"; break;
+ // <operator-name> ::= ?_1 # %=
+ case OO_PercentEqual: Out << "?_1"; break;
+ // <operator-name> ::= ?_2 # >>=
+ case OO_GreaterGreaterEqual: Out << "?_2"; break;
+ // <operator-name> ::= ?_3 # <<=
+ case OO_LessLessEqual: Out << "?_3"; break;
+ // <operator-name> ::= ?_4 # &=
+ case OO_AmpEqual: Out << "?_4"; break;
+ // <operator-name> ::= ?_5 # |=
+ case OO_PipeEqual: Out << "?_5"; break;
+ // <operator-name> ::= ?_6 # ^=
+ case OO_CaretEqual: Out << "?_6"; break;
+ // ?_7 # vftable
+ // ?_8 # vbtable
+ // ?_9 # vcall
+ // ?_A # typeof
+ // ?_B # local static guard
+ // ?_C # string
+ // ?_D # vbase destructor
+ // ?_E # vector deleting destructor
+ // ?_F # default constructor closure
+ // ?_G # scalar deleting destructor
+ // ?_H # vector constructor iterator
+ // ?_I # vector destructor iterator
+ // ?_J # vector vbase constructor iterator
+ // ?_K # virtual displacement map
+ // ?_L # eh vector constructor iterator
+ // ?_M # eh vector destructor iterator
+ // ?_N # eh vector vbase constructor iterator
+ // ?_O # copy constructor closure
+ // ?_P<name> # udt returning <name>
+ // ?_Q # <unknown>
+ // ?_R0 # RTTI Type Descriptor
+ // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d)
+ // ?_R2 # RTTI Base Class Array
+ // ?_R3 # RTTI Class Hierarchy Descriptor
+ // ?_R4 # RTTI Complete Object Locator
+ // ?_S # local vftable
+ // ?_T # local vftable constructor closure
+ // <operator-name> ::= ?_U # new[]
+ case OO_Array_New: Out << "?_U"; break;
+ // <operator-name> ::= ?_V # delete[]
+ case OO_Array_Delete: Out << "?_V"; break;
+
+ case OO_Conditional:
+ assert(false && "Don't know how to mangle ?:");
+ break;
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ assert(false && "Not an overloaded operator");
+ break;
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source name> ::= <identifier> @
+ Out << II->getName() << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ llvm::SmallString<64> Buffer;
+ MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD);
+ Out << Buffer;
+}
+
+void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
+ bool IsMember) {
+ // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
+ // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only);
+ // 'I' means __restrict (32/64-bit).
+ // Note that the MSVC __restrict keyword isn't the same as the C99 restrict
+ // keyword!
+ // <base-cvr-qualifiers> ::= A # near
+ // ::= B # near const
+ // ::= C # near volatile
+ // ::= D # near const volatile
+ // ::= E # far (16-bit)
+ // ::= F # far const (16-bit)
+ // ::= G # far volatile (16-bit)
+ // ::= H # far const volatile (16-bit)
+ // ::= I # huge (16-bit)
+ // ::= J # huge const (16-bit)
+ // ::= K # huge volatile (16-bit)
+ // ::= L # huge const volatile (16-bit)
+ // ::= M <basis> # based
+ // ::= N <basis> # based const
+ // ::= O <basis> # based volatile
+ // ::= P <basis> # based const volatile
+ // ::= Q # near member
+ // ::= R # near const member
+ // ::= S # near volatile member
+ // ::= T # near const volatile member
+ // ::= U # far member (16-bit)
+ // ::= V # far const member (16-bit)
+ // ::= W # far volatile member (16-bit)
+ // ::= X # far const volatile member (16-bit)
+ // ::= Y # huge member (16-bit)
+ // ::= Z # huge const member (16-bit)
+ // ::= 0 # huge volatile member (16-bit)
+ // ::= 1 # huge const volatile member (16-bit)
+ // ::= 2 <basis> # based member
+ // ::= 3 <basis> # based const member
+ // ::= 4 <basis> # based volatile member
+ // ::= 5 <basis> # based const volatile member
+ // ::= 6 # near function (pointers only)
+ // ::= 7 # far function (pointers only)
+ // ::= 8 # near method (pointers only)
+ // ::= 9 # far method (pointers only)
+ // ::= _A <basis> # based function (pointers only)
+ // ::= _B <basis> # based function (far?) (pointers only)
+ // ::= _C <basis> # based method (pointers only)
+ // ::= _D <basis> # based method (far?) (pointers only)
+ // ::= _E # block (Clang)
+ // <basis> ::= 0 # __based(void)
+ // ::= 1 # __based(segment)?
+ // ::= 2 <name> # __based(name)
+ // ::= 3 # ?
+ // ::= 4 # ?
+ // ::= 5 # not really based
+ if (!IsMember) {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'A';
+ else
+ Out << 'B';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'C';
+ else
+ Out << 'D';
+ }
+ } else {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'Q';
+ else
+ Out << 'R';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'S';
+ else
+ Out << 'T';
+ }
+ }
+
+ // FIXME: For now, just drop all extension qualifiers on the floor.
+}
+
+void MicrosoftCXXNameMangler::mangleType(QualType T) {
+ // Only operate on the canonical type!
+ T = getASTContext().getCanonicalType(T);
+
+ Qualifiers Quals = T.getLocalQualifiers();
+ if (Quals) {
+ // We have to mangle these now, while we still have enough information.
+ // <pointer-cvr-qualifiers> ::= P # pointer
+ // ::= Q # const pointer
+ // ::= R # volatile pointer
+ // ::= S # const volatile pointer
+ if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ if (!Quals.hasVolatile())
+ Out << 'Q';
+ else {
+ if (!Quals.hasConst())
+ Out << 'R';
+ else
+ Out << 'S';
+ }
+ } else
+ // Just emit qualifiers like normal.
+ // NB: When we mangle a pointer/reference type, and the pointee
+ // type has no qualifiers, the lack of qualifier gets mangled
+ // in there.
+ mangleQualifiers(Quals, false);
+ } else if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ Out << 'P';
+ }
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+return;
+#define TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
+break;
+#include "clang/AST/TypeNodes.def"
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= X # void
+ // ::= C # signed char
+ // ::= D # char
+ // ::= E # unsigned char
+ // ::= F # short
+ // ::= G # unsigned short (or wchar_t if it's not a builtin)
+ // ::= H # int
+ // ::= I # unsigned int
+ // ::= J # long
+ // ::= K # unsigned long
+ // L # <none>
+ // ::= M # float
+ // ::= N # double
+ // ::= O # long double (__float80 is mangled differently)
+ // ::= _D # __int8 (yup, it's a distinct type in MSVC)
+ // ::= _E # unsigned __int8
+ // ::= _F # __int16
+ // ::= _G # unsigned __int16
+ // ::= _H # __int32
+ // ::= _I # unsigned __int32
+ // ::= _J # long long, __int64
+ // ::= _K # unsigned long long, __int64
+ // ::= _L # __int128
+ // ::= _M # unsigned __int128
+ // ::= _N # bool
+ // _O # <array in parameter>
+ // ::= _T # __float80 (Intel)
+ // ::= _W # wchar_t
+ // ::= _Z # __float80 (Digital Mars)
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'X'; break;
+ case BuiltinType::SChar: Out << 'C'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break;
+ case BuiltinType::UChar: Out << 'E'; break;
+ case BuiltinType::Short: Out << 'F'; break;
+ case BuiltinType::UShort: Out << 'G'; break;
+ case BuiltinType::Int: Out << 'H'; break;
+ case BuiltinType::UInt: Out << 'I'; break;
+ case BuiltinType::Long: Out << 'J'; break;
+ case BuiltinType::ULong: Out << 'K'; break;
+ case BuiltinType::Float: Out << 'M'; break;
+ case BuiltinType::Double: Out << 'N'; break;
+ // TODO: Determine size and mangle accordingly
+ case BuiltinType::LongDouble: Out << 'O'; break;
+ // TODO: __int8 and friends
+ case BuiltinType::LongLong: Out << "_J"; break;
+ case BuiltinType::ULongLong: Out << "_K"; break;
+ case BuiltinType::Int128: Out << "_L"; break;
+ case BuiltinType::UInt128: Out << "_M"; break;
+ case BuiltinType::Bool: Out << "_N"; break;
+ case BuiltinType::WChar: Out << "_W"; break;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ assert(false &&
+ "Overloaded and dependent types shouldn't get to name mangling");
+ break;
+ case BuiltinType::UndeducedAuto:
+ assert(0 && "Should not see undeduced auto here");
+ break;
+ case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
+ case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
+ case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
+
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::NullPtr:
+ assert(false && "Don't know how to mangle this type");
+ break;
+ }
+}
+
+// <type> ::= <function-type>
+void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T) {
+ // Structors only appear in decls, so at this point we know it's not a
+ // structor type.
+ // I'll probably have mangleType(MemberPointerType) call the mangleType()
+ // method directly.
+ mangleType(T, NULL, false, false);
+}
+void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ llvm_unreachable("Can't mangle K&R function prototypes");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
+ const FunctionDecl *D,
+ bool IsStructor,
+ bool IsInstMethod) {
+ // <function-type> ::= <this-cvr-qualifiers> <calling-convention>
+ // <return-type> <argument-list> <throw-spec>
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // If this is a C++ instance method, mangle the CVR qualifiers for the
+ // this pointer.
+ if (IsInstMethod)
+ mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false);
+
+ mangleCallingConvention(T);
+
+ // <return-type> ::= <type>
+ // ::= @ # structors (they have no declared return type)
+ if (IsStructor)
+ Out << '@';
+ else
+ mangleType(Proto->getResultType());
+
+ // <argument-list> ::= X # void
+ // ::= <type>+ @
+ // ::= <type>* Z # varargs
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ Out << 'X';
+ } else {
+ if (D) {
+ // If we got a decl, use the "types-as-written" to make sure arrays
+ // get mangled right.
+ for (FunctionDecl::param_const_iterator Parm = D->param_begin(),
+ ParmEnd = D->param_end();
+ Parm != ParmEnd; ++Parm)
+ mangleType((*Parm)->getTypeSourceInfo()->getType());
+ } else {
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(*Arg);
+ }
+ // <builtin-type> ::= Z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'Z';
+ else
+ Out << '@';
+ }
+
+ mangleThrowSpecification(Proto);
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
+ // <function-class> ::= A # private: near
+ // ::= B # private: far
+ // ::= C # private: static near
+ // ::= D # private: static far
+ // ::= E # private: virtual near
+ // ::= F # private: virtual far
+ // ::= G # private: thunk near
+ // ::= H # private: thunk far
+ // ::= I # protected: near
+ // ::= J # protected: far
+ // ::= K # protected: static near
+ // ::= L # protected: static far
+ // ::= M # protected: virtual near
+ // ::= N # protected: virtual far
+ // ::= O # protected: thunk near
+ // ::= P # protected: thunk far
+ // ::= Q # public: near
+ // ::= R # public: far
+ // ::= S # public: static near
+ // ::= T # public: static far
+ // ::= U # public: virtual near
+ // ::= V # public: virtual far
+ // ::= W # public: thunk near
+ // ::= X # public: thunk far
+ // ::= Y # global near
+ // ::= Z # global far
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ switch (MD->getAccess()) {
+ default:
+ case AS_private:
+ if (MD->isStatic())
+ Out << 'C';
+ else if (MD->isVirtual())
+ Out << 'E';
+ else
+ Out << 'A';
+ break;
+ case AS_protected:
+ if (MD->isStatic())
+ Out << 'K';
+ else if (MD->isVirtual())
+ Out << 'M';
+ else
+ Out << 'I';
+ break;
+ case AS_public:
+ if (MD->isStatic())
+ Out << 'S';
+ else if (MD->isVirtual())
+ Out << 'U';
+ else
+ Out << 'Q';
+ }
+ } else
+ Out << 'Y';
+}
+void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
+ // <calling-convention> ::= A # __cdecl
+ // ::= B # __export __cdecl
+ // ::= C # __pascal
+ // ::= D # __export __pascal
+ // ::= E # __thiscall
+ // ::= F # __export __thiscall
+ // ::= G # __stdcall
+ // ::= H # __export __stdcall
+ // ::= I # __fastcall
+ // ::= J # __export __fastcall
+ // The 'export' calling conventions are from a bygone era
+ // (*cough*Win16*cough*) when functions were declared for export with
+ // that keyword. (It didn't actually export them, it just made them so
+ // that they could be in a DLL and somebody from another module could call
+ // them.)
+ switch (T->getCallConv()) {
+ case CC_Default:
+ case CC_C: Out << 'A'; break;
+ case CC_X86ThisCall: Out << 'E'; break;
+ case CC_X86StdCall: Out << 'G'; break;
+ case CC_X86FastCall: Out << 'I'; break;
+ }
+}
+void MicrosoftCXXNameMangler::mangleThrowSpecification(
+ const FunctionProtoType *FT) {
+ // <throw-spec> ::= Z # throw(...) (default)
+ // ::= @ # throw() or __declspec/__attribute__((nothrow))
+ // ::= <type>+
+ // NOTE: Since the Microsoft compiler ignores throw specifications, they are
+ // all actually mangled as 'Z'. (They're ignored because their associated
+ // functionality isn't implemented, and probably never will be.)
+ Out << 'Z';
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ assert(false && "Don't know how to mangle UnresolvedUsingTypes yet!");
+}
+
+// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
+// <union-type> ::= T <name>
+// <struct-type> ::= U <name>
+// <class-type> ::= V <name>
+// <enum-type> ::= W <size> <name>
+void MicrosoftCXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
+ switch (T->getDecl()->getTagKind()) {
+ case TTK_Union:
+ Out << 'T';
+ break;
+ case TTK_Struct:
+ Out << 'U';
+ break;
+ case TTK_Class:
+ Out << 'V';
+ break;
+ case TTK_Enum:
+ Out << 'W';
+ Out << getASTContext().getTypeSizeInChars(
+ cast<EnumDecl>(T->getDecl())->getIntegerType()).getQuantity();
+ break;
+ }
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= P <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as global
+// ::= Q <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as param
+// It's supposed to be the other way around, but for some strange reason, it
+// isn't. Today this behavior is retained for the sole purpose of backwards
+// compatibility.
+void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) {
+ // This isn't a recursive mangling, so now we have to do it all in this
+ // one call.
+ if (IsGlobal)
+ Out << 'P';
+ else
+ Out << 'Q';
+ mangleExtraDimensions(T->getElementType());
+}
+void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
+ llvm::SmallVector<llvm::APInt, 3> Dimensions;
+ for (;;) {
+ if (ElementTy->isConstantArrayType()) {
+ const ConstantArrayType *CAT =
+ static_cast<const ConstantArrayType *>(ElementTy.getTypePtr());
+ Dimensions.push_back(CAT->getSize());
+ ElementTy = CAT->getElementType();
+ } else if (ElementTy->isVariableArrayType()) {
+ assert(false && "Don't know how to mangle VLAs!");
+ } else if (ElementTy->isDependentSizedArrayType()) {
+ // The dependent expression has to be folded into a constant (TODO).
+ assert(false && "Don't know how to mangle dependent-sized arrays!");
+ } else if (ElementTy->isIncompleteArrayType()) continue;
+ else break;
+ }
+ mangleQualifiers(ElementTy.getQualifiers(), false);
+ // If there are any additional dimensions, mangle them now.
+ if (Dimensions.size() > 0) {
+ Out << 'Y';
+ // <dimension-count> ::= <number> # number of extra dimensions
+ mangleNumber(Dimensions.size());
+ for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) {
+ mangleNumber(Dimensions[Dim].getLimitedValue());
+ }
+ }
+ mangleType(ElementTy.getLocalUnqualifiedType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
+// <class name> <type>
+void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) {
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ Out << '8';
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(FPT, NULL, false, true);
+ } else {
+ mangleQualifiers(PointeeType.getQualifiers(), true);
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(PointeeType.getLocalUnqualifiedType());
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ assert(false && "Don't know how to mangle TemplateTypeParmTypes yet!");
+}
+
+// <type> ::= <pointer-type>
+// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const PointerType *T) {
+ QualType PointeeTy = T->getPointeeType();
+ if (PointeeTy->isArrayType()) {
+ // Pointers to arrays are mangled like arrays.
+ mangleExtraDimensions(T->getPointeeType());
+ } else if (PointeeTy->isFunctionType()) {
+ // Function pointers are special.
+ Out << '6';
+ mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()),
+ NULL, false, false);
+ } else {
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+ }
+}
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ // Object pointers never have qualifiers.
+ Out << 'A';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= <reference-type>
+// <reference-type> ::= A <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'A';
+ QualType PointeeTy = T->getPointeeType();
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+}
+
+void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T) {
+ assert(false && "Don't know how to mangle RValueReferenceTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ComplexType *T) {
+ assert(false && "Don't know how to mangle ComplexTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const VectorType *T) {
+ assert(false && "Don't know how to mangle VectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T) {
+ assert(false && "Don't know how to mangle ExtVectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ assert(false && "Don't know how to mangle DependentSizedExtVectorTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ // ObjC interfaces have structs underlying them.
+ Out << 'U';
+ mangleName(T->getDecl());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "_E";
+ mangleType(T->getPointeeType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ assert(false && "Don't know how to mangle InjectedClassNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ assert(false && "Don't know how to mangle TemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T) {
+ assert(false && "Don't know how to mangle DependentNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(
+ const DependentTemplateSpecializationType *T) {
+ assert(false &&
+ "Don't know how to mangle DependentTemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T) {
+ assert(false && "Don't know how to mangle TypeOfTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T) {
+ assert(false && "Don't know how to mangle TypeOfExprTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T) {
+ assert(false && "Don't know how to mangle DecltypeTypes yet!");
+}
+
+void MicrosoftMangleContext::mangleName(const NamedDecl *D,
+ llvm::SmallVectorImpl<char> &Name) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ MicrosoftCXXNameMangler Mangler(*this, Name);
+ return Mangler.mangle(D);
+}
+void MicrosoftMangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle thunks!");
+}
+void MicrosoftMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThisAdjustment &,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle destructor thunks!");
+}
+void MicrosoftMangleContext::mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle guard variables!");
+}
+void MicrosoftMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle virtual tables!");
+}
+void MicrosoftMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &) {
+ llvm_unreachable("The MS C++ ABI does not have virtual table tables!");
+}
+void MicrosoftMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
+ int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &) {
+ llvm_unreachable("The MS C++ ABI does not have constructor vtables!");
+}
+void MicrosoftMangleContext::mangleCXXRTTI(QualType T,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle RTTI!");
+}
+void MicrosoftMangleContext::mangleCXXRTTIName(QualType T,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle RTTI names!");
+}
+void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle constructors!");
+}
+void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle destructors!");
+}
+
+CXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
+ return new MicrosoftCXXABI(CGM);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
index 9905ca6..6d9d277 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -13,7 +13,7 @@
#include "clang/CodeGen/ModuleBuilder.h"
#include "CodeGenModule.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
index b29d3cb..c65f203 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -17,6 +17,7 @@
#include "CodeGenFunction.h"
#include "clang/AST/RecordLayout.h"
#include "llvm/Type.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/raw_ostream.h"
@@ -280,7 +281,9 @@ class DefaultABIInfo : public ABIInfo {
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
@@ -316,6 +319,10 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+//===----------------------------------------------------------------------===//
+// X86-32 ABI Implementation
+//===----------------------------------------------------------------------===//
+
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
ASTContext &Context;
@@ -343,7 +350,9 @@ public:
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
@@ -599,8 +608,7 @@ llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
@@ -657,9 +665,17 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
return false;
}
+//===----------------------------------------------------------------------===//
+// X86-64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+
namespace {
/// X86_64ABIInfo - The X86_64 ABI information.
class X86_64ABIInfo : public ABIInfo {
+ ASTContext &Context;
+ const llvm::TargetData &TD;
+
enum Class {
Integer = 0,
SSE,
@@ -680,7 +696,7 @@ class X86_64ABIInfo : public ABIInfo {
/// always be either NoClass or the result of a previous merge
/// call. In addition, this should never be Memory (the caller
/// should just return Memory for the aggregate).
- Class merge(Class Accum, Class Field) const;
+ static Class merge(Class Accum, Class Field);
/// classify - Determine the x86_64 register classes in which the
/// given type T should be passed.
@@ -703,8 +719,7 @@ class X86_64ABIInfo : public ABIInfo {
///
/// If the \arg Lo class is ComplexX87, then the \arg Hi class will
/// also be ComplexX87.
- void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
- Class &Lo, Class &Hi) const;
+ void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
/// getCoerceResult - Given a source type \arg Ty and an LLVM type
/// to coerce to, chose the best way to pass Ty in the same place
@@ -716,30 +731,33 @@ class X86_64ABIInfo : public ABIInfo {
/// type. This makes this code more explicit, and it makes it clearer that we
/// are also doing this for correctness in the case of passing scalar types.
ABIArgInfo getCoerceResult(QualType Ty,
- const llvm::Type *CoerceTo,
- ASTContext &Context) const;
+ const llvm::Type *CoerceTo) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be returned in memory.
- ABIArgInfo getIndirectReturnResult(QualType Ty, ASTContext &Context) const;
+ ABIArgInfo getIndirectReturnResult(QualType Ty) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context) const;
+ ABIArgInfo getIndirectResult(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
llvm::LLVMContext &VMContext) const;
ABIArgInfo classifyArgumentType(QualType Ty,
- ASTContext &Context,
llvm::LLVMContext &VMContext,
unsigned &neededInt,
- unsigned &neededSSE) const;
+ unsigned &neededSSE,
+ const llvm::Type *PrefType) const;
public:
+ X86_64ABIInfo(ASTContext &Ctx, const llvm::TargetData &td)
+ : Context(Ctx), TD(td) {}
+
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
@@ -747,7 +765,8 @@ public:
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_64TargetCodeGenInfo():TargetCodeGenInfo(new X86_64ABIInfo()) {}
+ X86_64TargetCodeGenInfo(ASTContext &Ctx, const llvm::TargetData &TD)
+ : TargetCodeGenInfo(new X86_64ABIInfo(Ctx, TD)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
return 7;
@@ -771,8 +790,7 @@ public:
}
-X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
- Class Field) const {
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
// AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
// classified recursively so that always two fields are
// considered. The resulting class is calculated according to
@@ -800,22 +818,19 @@ X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
"Invalid accumulated classification during merge.");
if (Accum == Field || Field == NoClass)
return Accum;
- else if (Field == Memory)
+ if (Field == Memory)
return Memory;
- else if (Accum == NoClass)
+ if (Accum == NoClass)
return Field;
- else if (Accum == Integer || Field == Integer)
+ if (Accum == Integer || Field == Integer)
return Integer;
- else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
- Accum == X87 || Accum == X87Up)
+ if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
return Memory;
- else
- return SSE;
+ return SSE;
}
-void X86_64ABIInfo::classify(QualType Ty,
- ASTContext &Context,
- uint64_t OffsetBase,
+void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Class &Lo, Class &Hi) const {
// FIXME: This code can be simplified by introducing a simple value class for
// Class pairs with appropriate constructor methods for the various
@@ -848,17 +863,29 @@ void X86_64ABIInfo::classify(QualType Ty,
}
// FIXME: _Decimal32 and _Decimal64 are SSE.
// FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
- } else if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ return;
+ }
+
+ if (const EnumType *ET = Ty->getAs<EnumType>()) {
// Classify the underlying integer type.
- classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
- } else if (Ty->hasPointerRepresentation()) {
+ classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
+ return;
+ }
+
+ if (Ty->hasPointerRepresentation()) {
Current = Integer;
- } else if (Ty->isMemberPointerType()) {
+ return;
+ }
+
+ if (Ty->isMemberPointerType()) {
if (Ty->isMemberFunctionPointerType())
Lo = Hi = Integer;
else
Current = Integer;
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ return;
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
uint64_t Size = Context.getTypeSize(VT);
if (Size == 32) {
// gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
@@ -890,11 +917,14 @@ void X86_64ABIInfo::classify(QualType Ty,
Lo = SSE;
Hi = SSEUp;
}
- } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ return;
+ }
+
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
QualType ET = Context.getCanonicalType(CT->getElementType());
uint64_t Size = Context.getTypeSize(Ty);
- if (ET->isIntegralType()) {
+ if (ET->isIntegralOrEnumerationType()) {
if (Size <= 64)
Current = Integer;
else if (Size <= 128)
@@ -912,7 +942,11 @@ void X86_64ABIInfo::classify(QualType Ty,
uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
if (Hi == NoClass && EB_Real != EB_Imag)
Hi = Lo;
- } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+
+ return;
+ }
+
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
// Arrays are treated like structures.
uint64_t Size = Context.getTypeSize(Ty);
@@ -936,7 +970,7 @@ void X86_64ABIInfo::classify(QualType Ty,
uint64_t ArraySize = AT->getSize().getZExtValue();
for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
Class FieldLo, FieldHi;
- classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
+ classify(AT->getElementType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory)
@@ -947,7 +981,10 @@ void X86_64ABIInfo::classify(QualType Ty,
if (Hi == Memory)
Lo = Memory;
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ return;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
uint64_t Size = Context.getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
@@ -988,7 +1025,7 @@ void X86_64ABIInfo::classify(QualType Ty,
// initialized to class NO_CLASS.
Class FieldLo, FieldHi;
uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
- classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ classify(i->getType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory)
@@ -1047,7 +1084,7 @@ void X86_64ABIInfo::classify(QualType Ty,
FieldHi = EB_Hi ? Integer : NoClass;
}
} else
- classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ classify(i->getType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory)
@@ -1074,9 +1111,8 @@ void X86_64ABIInfo::classify(QualType Ty,
}
ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
- const llvm::Type *CoerceTo,
- ASTContext &Context) const {
- if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
+ const llvm::Type *CoerceTo) const {
+ if (CoerceTo->isIntegerTy(64) || isa<llvm::PointerType>(CoerceTo)) {
// Integer and pointer types will end up in a general purpose
// register.
@@ -1084,10 +1120,21 @@ ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- if (Ty->isIntegralType() || Ty->hasPointerRepresentation())
+ if (Ty->isIntegralOrEnumerationType() || Ty->hasPointerRepresentation())
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
+
+ // If this is a 8/16/32-bit structure that is passed as an int64, then it
+ // will be passed in the low 8/16/32-bits of a 64-bit GPR, which is the same
+ // as how an i8/i16/i32 is passed. Coerce to a i8/i16/i32 instead of a i64.
+ switch (Context.getTypeSizeInChars(Ty).getQuantity()) {
+ default: break;
+ case 1: CoerceTo = llvm::Type::getInt8Ty(CoerceTo->getContext()); break;
+ case 2: CoerceTo = llvm::Type::getInt16Ty(CoerceTo->getContext()); break;
+ case 4: CoerceTo = llvm::Type::getInt32Ty(CoerceTo->getContext()); break;
+ }
+
+ } else if (CoerceTo->isDoubleTy()) {
assert(Ty.isCanonical() && "should always have a canonical type here");
assert(!Ty.hasQualifiers() && "should never have a qualified type here");
@@ -1095,13 +1142,17 @@ ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
if (Ty == Context.FloatTy || Ty == Context.DoubleTy)
return ABIArgInfo::getDirect();
+ // If this is a 32-bit structure that is passed as a double, then it will be
+ // passed in the low 32-bits of the XMM register, which is the same as how a
+ // float is passed. Coerce to a float instead of a double.
+ if (Context.getTypeSizeInChars(Ty).getQuantity() == 4)
+ CoerceTo = llvm::Type::getFloatTy(CoerceTo->getContext());
}
return ABIArgInfo::getCoerce(CoerceTo);
}
-ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty,
- ASTContext &Context) const {
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
@@ -1116,8 +1167,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty,
return ABIArgInfo::getIndirect(0);
}
-ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
- ASTContext &Context) const {
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
@@ -1141,13 +1191,12 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
return ABIArgInfo::getIndirect(0);
}
-ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ABIArgInfo X86_64ABIInfo::
+classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const {
// AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
// classification algorithm.
X86_64ABIInfo::Class Lo, Hi;
- classify(RetTy, Context, 0, Lo, Hi);
+ classify(RetTy, 0, Lo, Hi);
// Check some invariants.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
@@ -1166,7 +1215,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
// AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
// hidden argument.
case Memory:
- return getIndirectReturnResult(RetTy, Context);
+ return getIndirectReturnResult(RetTy);
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
@@ -1236,15 +1285,40 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
break;
}
- return getCoerceResult(RetTy, ResType, Context);
+ return getCoerceResult(RetTy, ResType);
}
-ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
+static const llvm::Type *Get8ByteTypeAtOffset(const llvm::Type *PrefType,
+ unsigned Offset,
+ const llvm::TargetData &TD) {
+ if (PrefType == 0) return 0;
+
+ // Pointers are always 8-bytes at offset 0.
+ if (Offset == 0 && isa<llvm::PointerType>(PrefType))
+ return PrefType;
+
+ // TODO: 1/2/4/8 byte integers are also interesting, but we have to know that
+ // the "hole" is not used in the containing struct (just undef padding).
+ const llvm::StructType *STy = dyn_cast<llvm::StructType>(PrefType);
+ if (STy == 0) return 0;
+
+ // If this is a struct, recurse into the field at the specified offset.
+ const llvm::StructLayout *SL = TD.getStructLayout(STy);
+ if (Offset >= SL->getSizeInBytes()) return 0;
+
+ unsigned FieldIdx = SL->getElementContainingOffset(Offset);
+ Offset -= SL->getElementOffset(FieldIdx);
+
+ return Get8ByteTypeAtOffset(STy->getElementType(FieldIdx), Offset, TD);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
llvm::LLVMContext &VMContext,
unsigned &neededInt,
- unsigned &neededSSE) const {
+ unsigned &neededSSE,
+ const llvm::Type *PrefType)const{
X86_64ABIInfo::Class Lo, Hi;
- classify(Ty, Context, 0, Lo, Hi);
+ classify(Ty, 0, Lo, Hi);
// Check some invariants.
// FIXME: Enforce these by construction.
@@ -1267,7 +1341,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
// COMPLEX_X87, it is passed in memory.
case X87:
case ComplexX87:
- return getIndirectResult(Ty, Context);
+ return getIndirectResult(Ty);
case SSEUp:
case X87Up:
@@ -1277,8 +1351,16 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
// available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
// and %r9 is used.
case Integer:
- ++neededInt;
+ // It is always safe to classify this as an i64 argument.
ResType = llvm::Type::getInt64Ty(VMContext);
+ ++neededInt;
+
+ // If we can choose a better 8-byte type based on the preferred type, and if
+ // that type is still passed in a GPR, use it.
+ if (const llvm::Type *PrefTypeLo = Get8ByteTypeAtOffset(PrefType, 0, TD))
+ if (isa<llvm::IntegerType>(PrefTypeLo) ||
+ isa<llvm::PointerType>(PrefTypeLo))
+ ResType = PrefTypeLo;
break;
// AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
@@ -1301,11 +1383,22 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
break;
case NoClass: break;
- case Integer:
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getInt64Ty(VMContext), NULL);
+
+ case Integer: {
+ // It is always safe to classify this as an i64 argument.
+ const llvm::Type *HiType = llvm::Type::getInt64Ty(VMContext);
++neededInt;
+
+ // If we can choose a better 8-byte type based on the preferred type, and if
+ // that type is still passed in a GPR, use it.
+ if (const llvm::Type *PrefTypeHi = Get8ByteTypeAtOffset(PrefType, 8, TD))
+ if (isa<llvm::IntegerType>(PrefTypeHi) ||
+ isa<llvm::PointerType>(PrefTypeHi))
+ HiType = PrefTypeHi;
+
+ ResType = llvm::StructType::get(VMContext, ResType, HiType, NULL);
break;
+ }
// X87Up generally doesn't occur here (long double is passed in
// memory), except in situations involving unions.
@@ -1325,13 +1418,14 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
break;
}
- return getCoerceResult(Ty, ResType, Context);
+ return getCoerceResult(Ty, ResType);
}
void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
- Context, VMContext);
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), VMContext);
// Keep track of the number of assigned registers.
unsigned freeIntRegs = 6, freeSSERegs = 8;
@@ -1345,9 +1439,17 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
// get assigned (in left-to-right order) for passing as follows...
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it) {
+ // If the client specified a preferred IR type to use, pass it down to
+ // classifyArgumentType.
+ const llvm::Type *PrefType = 0;
+ if (NumPrefTypes) {
+ PrefType = *PrefTypes++;
+ --NumPrefTypes;
+ }
+
unsigned neededInt, neededSSE;
- it->info = classifyArgumentType(it->type, Context, VMContext,
- neededInt, neededSSE);
+ it->info = classifyArgumentType(it->type, VMContext,
+ neededInt, neededSSE, PrefType);
// AMD64-ABI 3.2.3p3: If there are no registers available for any
// eightbyte of an argument, the whole argument is passed on the
@@ -1357,7 +1459,7 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
freeIntRegs -= neededInt;
freeSSERegs -= neededSSE;
} else {
- it->info = getIndirectResult(it->type, Context);
+ it->info = getIndirectResult(it->type);
}
}
}
@@ -1380,12 +1482,11 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
// overflow_arg_area = (overflow_arg_area + 15) & ~15;
llvm::Value *Offset =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
+ llvm::ConstantInt::get(CGF.Int32Ty, 15);
overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
- llvm::Type::getInt64Ty(CGF.getLLVMContext()));
- llvm::Value *Mask = llvm::ConstantInt::get(
- llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
+ CGF.Int64Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL);
overflow_arg_area =
CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
overflow_arg_area->getType(),
@@ -1405,8 +1506,7 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
llvm::Value *Offset =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
- (SizeInBytes + 7) & ~7);
+ llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
"overflow_arg_area.next");
CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
@@ -1418,8 +1518,6 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::LLVMContext &VMContext = CGF.getLLVMContext();
- const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext);
- const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
@@ -1431,8 +1529,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
unsigned neededInt, neededSSE;
Ty = CGF.getContext().getCanonicalType(Ty);
- ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
- neededInt, neededSSE);
+ ABIArgInfo AI = classifyArgumentType(Ty, VMContext, neededInt, neededSSE, 0);
// AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
// in the registers. If not go to step 7.
@@ -1456,21 +1553,16 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
if (neededInt) {
gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
- InRegs =
- CGF.Builder.CreateICmpULE(gp_offset,
- llvm::ConstantInt::get(i32Ty,
- 48 - neededInt * 8),
- "fits_in_gp");
+ InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
+ InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
}
if (neededSSE) {
fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
llvm::Value *FitsInFP =
- CGF.Builder.CreateICmpULE(fp_offset,
- llvm::ConstantInt::get(i32Ty,
- 176 - neededSSE * 16),
- "fits_in_fp");
+ llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
+ FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
}
@@ -1525,45 +1617,42 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
RegAddr = CGF.Builder.CreateBitCast(RegAddr,
llvm::PointerType::getUnqual(LTy));
+ } else if (neededSSE == 1) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
} else {
- if (neededSSE == 1) {
- RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- RegAddr = CGF.Builder.CreateBitCast(RegAddr,
- llvm::PointerType::getUnqual(LTy));
- } else {
- assert(neededSSE == 2 && "Invalid number of needed registers!");
- // SSE registers are spaced 16 bytes apart in the register save
- // area, we need to collect the two eightbytes together.
- llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- llvm::Value *RegAddrHi =
- CGF.Builder.CreateGEP(RegAddrLo,
- llvm::ConstantInt::get(i32Ty, 16));
- const llvm::Type *DblPtrTy =
- llvm::PointerType::getUnqual(DoubleTy);
- const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
- DoubleTy, NULL);
- llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
- RegAddr = CGF.Builder.CreateBitCast(Tmp,
- llvm::PointerType::getUnqual(LTy));
- }
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
+ const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+ const llvm::Type *DblPtrTy =
+ llvm::PointerType::getUnqual(DoubleTy);
+ const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
+ DoubleTy, NULL);
+ llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
}
// AMD64-ABI 3.5.7p5: Step 5. Set:
// l->gp_offset = l->gp_offset + num_gp * 8
// l->fp_offset = l->fp_offset + num_fp * 16.
if (neededInt) {
- llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8);
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
gp_offset_p);
}
if (neededSSE) {
- llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16);
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
fp_offset_p);
}
@@ -1582,11 +1671,14 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
ResAddr->reserveOperandSpace(2);
ResAddr->addIncoming(RegAddr, InRegBlock);
ResAddr->addIncoming(MemAddr, InMemBlock);
-
return ResAddr;
}
+
+
+//===----------------------------------------------------------------------===//
// PIC16 ABI Implementation
+//===----------------------------------------------------------------------===//
namespace {
@@ -1600,7 +1692,9 @@ class PIC16ABIInfo : public ABIInfo {
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
@@ -1636,7 +1730,7 @@ ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
}
llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+ CodeGenFunction &CGF) const {
const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
@@ -1719,7 +1813,9 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
}
+//===----------------------------------------------------------------------===//
// ARM ABI Implementation
+//===----------------------------------------------------------------------===//
namespace {
@@ -1749,7 +1845,9 @@ private:
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
@@ -1768,7 +1866,9 @@ public:
}
void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
@@ -1776,14 +1876,23 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
it->info = classifyArgumentType(it->type, Context, VMContext);
}
- // ARM always overrides the calling convention.
+ const llvm::Triple &Triple(Context.Target.getTriple());
+ llvm::CallingConv::ID DefaultCC;
+ if (Triple.getEnvironmentName() == "gnueabi" ||
+ Triple.getEnvironmentName() == "eabi")
+ DefaultCC = llvm::CallingConv::ARM_AAPCS;
+ else
+ DefaultCC = llvm::CallingConv::ARM_APCS;
+
switch (getABIKind()) {
case APCS:
- FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
+ if (DefaultCC != llvm::CallingConv::ARM_APCS)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
break;
case AAPCS:
- FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
+ if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
break;
case AAPCS_VFP:
@@ -1808,6 +1917,11 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
if (isEmptyRecord(Context, Ty, true))
return ABIArgInfo::getIgnore();
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
// FIXME: This is kind of nasty... but there isn't much choice because the ARM
// backend doesn't support byval.
// FIXME: This doesn't handle alignment > 64 bits.
@@ -1927,6 +2041,11 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
// Are we following APCS?
if (getABIKind() == APCS) {
if (isEmptyRecord(Context, RetTy, false))
@@ -1976,7 +2095,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
}
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+ CodeGenFunction &CGF) const {
// FIXME: Need to handle alignment
const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
@@ -1992,8 +2111,7 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
@@ -2017,7 +2135,9 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
}
}
+//===----------------------------------------------------------------------===//
// SystemZ ABI Implementation
+//===----------------------------------------------------------------------===//
namespace {
@@ -2031,7 +2151,9 @@ class SystemZABIInfo : public ABIInfo {
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
Context, VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
@@ -2101,7 +2223,9 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
}
}
+//===----------------------------------------------------------------------===//
// MSP430 ABI Implementation
+//===----------------------------------------------------------------------===//
namespace {
@@ -2138,8 +2262,11 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
}
}
+//===----------------------------------------------------------------------===//
// MIPS ABI Implementation. This works for both little-endian and
// big-endian variants.
+//===----------------------------------------------------------------------===//
+
namespace {
class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
public:
@@ -2195,10 +2322,10 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
// For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
// free it.
- const llvm::Triple &Triple(getContext().Target.getTriple());
+ const llvm::Triple &Triple = getContext().Target.getTriple();
switch (Triple.getArch()) {
default:
- return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo);
+ return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo());
case llvm::Triple::mips:
case llvm::Triple::mipsel:
@@ -2247,6 +2374,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
}
case llvm::Triple::x86_64:
- return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo());
+ return *(TheTargetCodeGenInfo =
+ new X86_64TargetCodeGenInfo(Context, TheTargetData));
}
}
OpenPOWER on IntegriCloud