summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen')
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h165
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp339
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp1267
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h205
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h28
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp2182
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp471
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h36
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h230
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp1397
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.h164
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp1363
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp1916
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h257
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp846
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp455
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp1574
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.h593
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp2108
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp776
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp1216
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp751
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp1122
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp2280
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp850
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp2218
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp6236
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h234
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp953
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h238
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp793
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp1211
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp92
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp488
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp3050
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h369
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGValue.h327
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt41
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp348
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp1341
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h1746
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp2162
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h620
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp516
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h224
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h121
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp1023
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Makefile19
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp2515
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Mangle.h177
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp1218
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp107
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/README.txt47
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp2665
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h108
55 files changed, 53798 insertions, 0 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
new file mode 100644
index 0000000..91b77425
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
@@ -0,0 +1,165 @@
+//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_ABIINFO_H
+#define CLANG_CODEGEN_ABIINFO_H
+
+#include "clang/AST/Type.h"
+#include "llvm/Type.h"
+
+namespace llvm {
+ class Value;
+ class LLVMContext;
+ class TargetData;
+}
+
+namespace clang {
+ class ASTContext;
+
+ namespace CodeGen {
+ class CGFunctionInfo;
+ class CodeGenFunction;
+ class CodeGenTypes;
+ }
+
+ // FIXME: All of this stuff should be part of the target interface
+ // somehow. It is currently here because it is not clear how to factor
+ // the targets to support this, since the Targets currently live in a
+ // layer below types n'stuff.
+
+ /// ABIArgInfo - Helper class to encapsulate information about how a
+ /// specific C type should be passed to or returned from a function.
+ class ABIArgInfo {
+ public:
+ enum Kind {
+ /// Direct - Pass the argument directly using the normal converted LLVM
+ /// type, or by coercing to another specified type stored in
+ /// 'CoerceToType'). If an offset is specified (in UIntData), then the
+ /// argument passed is offset by some number of bytes in the memory
+ /// representation.
+ Direct,
+
+ /// Extend - Valid only for integer argument types. Same as 'direct'
+ /// but also emit a zero/sign extension attribute.
+ Extend,
+
+ /// Indirect - Pass the argument indirectly via a hidden pointer
+ /// with the specified alignment (0 indicates default alignment).
+ Indirect,
+
+ /// Ignore - Ignore the argument (treat as void). Useful for void and
+ /// empty structs.
+ Ignore,
+
+ /// Expand - Only valid for aggregate argument types. The structure should
+ /// be expanded into consecutive arguments for its constituent fields.
+ /// Currently expand is only allowed on structures whose fields
+ /// are all scalar types or are themselves expandable types.
+ Expand,
+
+ KindFirst=Direct, KindLast=Expand
+ };
+
+ private:
+ Kind TheKind;
+ llvm::PATypeHolder TypeData;
+ unsigned UIntData;
+ bool BoolData;
+
+ ABIArgInfo(Kind K, const llvm::Type *TD=0,
+ unsigned UI=0, bool B = false)
+ : TheKind(K), TypeData(TD), UIntData(UI), BoolData(B) {}
+
+ public:
+ ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
+
+ static ABIArgInfo getDirect(const llvm::Type *T = 0, unsigned Offset = 0) {
+ return ABIArgInfo(Direct, T, Offset);
+ }
+ static ABIArgInfo getExtend(const llvm::Type *T = 0) {
+ return ABIArgInfo(Extend, T, 0);
+ }
+ static ABIArgInfo getIgnore() {
+ return ABIArgInfo(Ignore);
+ }
+ static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true) {
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal);
+ }
+ static ABIArgInfo getExpand() {
+ return ABIArgInfo(Expand);
+ }
+
+ Kind getKind() const { return TheKind; }
+ bool isDirect() const { return TheKind == Direct; }
+ bool isExtend() const { return TheKind == Extend; }
+ bool isIgnore() const { return TheKind == Ignore; }
+ bool isIndirect() const { return TheKind == Indirect; }
+ bool isExpand() const { return TheKind == Expand; }
+
+ bool canHaveCoerceToType() const {
+ return TheKind == Direct || TheKind == Extend;
+ }
+
+ // Direct/Extend accessors
+ unsigned getDirectOffset() const {
+ assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ return UIntData;
+ }
+ const llvm::Type *getCoerceToType() const {
+ assert(canHaveCoerceToType() && "Invalid kind!");
+ return TypeData;
+ }
+
+ void setCoerceToType(const llvm::Type *T) {
+ assert(canHaveCoerceToType() && "Invalid kind!");
+ TypeData = T;
+ }
+
+ // Indirect accessors
+ unsigned getIndirectAlign() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return UIntData;
+ }
+
+ bool getIndirectByVal() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return BoolData;
+ }
+
+ void dump() const;
+ };
+
+ /// ABIInfo - Target specific hooks for defining how a type should be
+ /// passed or returned from functions.
+ class ABIInfo {
+ public:
+ CodeGen::CodeGenTypes &CGT;
+
+ ABIInfo(CodeGen::CodeGenTypes &cgt) : CGT(cgt) {}
+ virtual ~ABIInfo();
+
+ ASTContext &getContext() const;
+ llvm::LLVMContext &getVMContext() const;
+ const llvm::TargetData &getTargetData() const;
+
+ virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
+
+ /// EmitVAArg - Emit the target dependent code to load a value of
+ /// \arg Ty from the va_list pointed to by \arg VAListAddr.
+
+ // FIXME: This is a gaping layering violation if we wanted to drop
+ // the ABI information any lower than CodeGen. Of course, for
+ // VAArg handling it has to be at this level; there is no way to
+ // abstract this out.
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGen::CodeGenFunction &CGF) const = 0;
+ };
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
new file mode 100644
index 0000000..69efe43
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -0,0 +1,339 @@
+//===--- BackendUtil.cpp - LLVM Backend Utilities -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/StandardPasses.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/SubtargetFeature.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegistry.h"
+using namespace clang;
+using namespace llvm;
+
+namespace {
+
+class EmitAssemblyHelper {
+ Diagnostic &Diags;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ Module *TheModule;
+
+ Timer CodeGenerationTime;
+
+ mutable FunctionPassManager *CodeGenPasses;
+ mutable PassManager *PerModulePasses;
+ mutable FunctionPassManager *PerFunctionPasses;
+
+private:
+ FunctionPassManager *getCodeGenPasses() const {
+ if (!CodeGenPasses) {
+ CodeGenPasses = new FunctionPassManager(TheModule);
+ CodeGenPasses->add(new TargetData(TheModule));
+ }
+ return CodeGenPasses;
+ }
+
+ PassManager *getPerModulePasses() const {
+ if (!PerModulePasses) {
+ PerModulePasses = new PassManager();
+ PerModulePasses->add(new TargetData(TheModule));
+ }
+ return PerModulePasses;
+ }
+
+ FunctionPassManager *getPerFunctionPasses() const {
+ if (!PerFunctionPasses) {
+ PerFunctionPasses = new FunctionPassManager(TheModule);
+ PerFunctionPasses->add(new TargetData(TheModule));
+ }
+ return PerFunctionPasses;
+ }
+
+ void CreatePasses();
+
+ /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR.
+ ///
+ /// \return True on success.
+ bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS);
+
+public:
+ EmitAssemblyHelper(Diagnostic &_Diags,
+ const CodeGenOptions &CGOpts, const TargetOptions &TOpts,
+ Module *M)
+ : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts),
+ TheModule(M), CodeGenerationTime("Code Generation Time"),
+ CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {}
+
+ ~EmitAssemblyHelper() {
+ delete CodeGenPasses;
+ delete PerModulePasses;
+ delete PerFunctionPasses;
+ }
+
+ void EmitAssembly(BackendAction Action, raw_ostream *OS);
+};
+
+}
+
+void EmitAssemblyHelper::CreatePasses() {
+ unsigned OptLevel = CodeGenOpts.OptimizationLevel;
+ CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
+
+ // Handle disabling of LLVM optimization, where we want to preserve the
+ // internal module before any optimization.
+ if (CodeGenOpts.DisableLLVMOpts) {
+ OptLevel = 0;
+ Inlining = CodeGenOpts.NoInlining;
+ }
+
+ // In -O0 if checking is disabled, we don't even have per-function passes.
+ if (CodeGenOpts.VerifyModule)
+ getPerFunctionPasses()->add(createVerifierPass());
+
+ // Assume that standard function passes aren't run for -O0.
+ if (OptLevel > 0)
+ llvm::createStandardFunctionPasses(getPerFunctionPasses(), OptLevel);
+
+ llvm::Pass *InliningPass = 0;
+ switch (Inlining) {
+ case CodeGenOptions::NoInlining: break;
+ case CodeGenOptions::NormalInlining: {
+ // Set the inline threshold following llvm-gcc.
+ //
+ // FIXME: Derive these constants in a principled fashion.
+ unsigned Threshold = 225;
+ if (CodeGenOpts.OptimizeSize)
+ Threshold = 75;
+ else if (OptLevel > 2)
+ Threshold = 275;
+ InliningPass = createFunctionInliningPass(Threshold);
+ break;
+ }
+ case CodeGenOptions::OnlyAlwaysInlining:
+ InliningPass = createAlwaysInlinerPass(); // Respect always_inline
+ break;
+ }
+
+ // For now we always create per module passes.
+ llvm::createStandardModulePasses(getPerModulePasses(), OptLevel,
+ CodeGenOpts.OptimizeSize,
+ CodeGenOpts.UnitAtATime,
+ CodeGenOpts.UnrollLoops,
+ CodeGenOpts.SimplifyLibCalls,
+ /*HaveExceptions=*/true,
+ InliningPass);
+}
+
+bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
+ formatted_raw_ostream &OS) {
+ // Create the TargetMachine for generating code.
+ std::string Error;
+ std::string Triple = TheModule->getTargetTriple();
+ const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
+ if (!TheTarget) {
+ Diags.Report(diag::err_fe_unable_to_create_target) << Error;
+ return false;
+ }
+
+ // FIXME: Expose these capabilities via actual APIs!!!! Aside from just
+ // being gross, this is also totally broken if we ever care about
+ // concurrency.
+
+ // Set frame pointer elimination mode.
+ if (!CodeGenOpts.DisableFPElim) {
+ llvm::NoFramePointerElim = false;
+ llvm::NoFramePointerElimNonLeaf = false;
+ } else if (CodeGenOpts.OmitLeafFramePointer) {
+ llvm::NoFramePointerElim = false;
+ llvm::NoFramePointerElimNonLeaf = true;
+ } else {
+ llvm::NoFramePointerElim = true;
+ llvm::NoFramePointerElimNonLeaf = true;
+ }
+
+ // Set float ABI type.
+ if (CodeGenOpts.FloatABI == "soft")
+ llvm::FloatABIType = llvm::FloatABI::Soft;
+ else if (CodeGenOpts.FloatABI == "hard")
+ llvm::FloatABIType = llvm::FloatABI::Hard;
+ else {
+ assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
+ llvm::FloatABIType = llvm::FloatABI::Default;
+ }
+
+ NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ llvm::UseSoftFloat = CodeGenOpts.SoftFloat;
+ UnwindTablesMandatory = CodeGenOpts.UnwindTables;
+
+ TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);
+
+ TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections);
+ TargetMachine::setDataSections (CodeGenOpts.DataSections);
+
+ // FIXME: Parse this earlier.
+ if (CodeGenOpts.RelocationModel == "static") {
+ TargetMachine::setRelocationModel(llvm::Reloc::Static);
+ } else if (CodeGenOpts.RelocationModel == "pic") {
+ TargetMachine::setRelocationModel(llvm::Reloc::PIC_);
+ } else {
+ assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
+ "Invalid PIC model!");
+ TargetMachine::setRelocationModel(llvm::Reloc::DynamicNoPIC);
+ }
+ // FIXME: Parse this earlier.
+ if (CodeGenOpts.CodeModel == "small") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Small);
+ } else if (CodeGenOpts.CodeModel == "kernel") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Kernel);
+ } else if (CodeGenOpts.CodeModel == "medium") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Medium);
+ } else if (CodeGenOpts.CodeModel == "large") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Large);
+ } else {
+ assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!");
+ TargetMachine::setCodeModel(llvm::CodeModel::Default);
+ }
+
+ std::vector<const char *> BackendArgs;
+ BackendArgs.push_back("clang"); // Fake program name.
+ if (!CodeGenOpts.DebugPass.empty()) {
+ BackendArgs.push_back("-debug-pass");
+ BackendArgs.push_back(CodeGenOpts.DebugPass.c_str());
+ }
+ if (!CodeGenOpts.LimitFloatPrecision.empty()) {
+ BackendArgs.push_back("-limit-float-precision");
+ BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
+ }
+ if (llvm::TimePassesIsEnabled)
+ BackendArgs.push_back("-time-passes");
+ BackendArgs.push_back(0);
+ llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
+ const_cast<char **>(&BackendArgs[0]));
+
+ std::string FeaturesStr;
+ if (TargetOpts.CPU.size() || TargetOpts.Features.size()) {
+ SubtargetFeatures Features;
+ Features.setCPU(TargetOpts.CPU);
+ for (std::vector<std::string>::const_iterator
+ it = TargetOpts.Features.begin(),
+ ie = TargetOpts.Features.end(); it != ie; ++it)
+ Features.AddFeature(*it);
+ FeaturesStr = Features.getString();
+ }
+ TargetMachine *TM = TheTarget->createTargetMachine(Triple, FeaturesStr);
+
+ if (CodeGenOpts.RelaxAll)
+ TM->setMCRelaxAll(true);
+
+ // Create the code generator passes.
+ FunctionPassManager *PM = getCodeGenPasses();
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+
+ switch (CodeGenOpts.OptimizationLevel) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ // Normal mode, emit a .s or .o file by running the code generator. Note,
+ // this also adds codegenerator level optimization passes.
+ TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
+ if (Action == Backend_EmitObj)
+ CGFT = TargetMachine::CGFT_ObjectFile;
+ else if (Action == Backend_EmitMCNull)
+ CGFT = TargetMachine::CGFT_Null;
+ else
+ assert(Action == Backend_EmitAssembly && "Invalid action!");
+ if (TM->addPassesToEmitFile(*PM, OS, CGFT, OptLevel,
+ /*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
+ Diags.Report(diag::err_fe_unable_to_interface_with_target);
+ return false;
+ }
+
+ return true;
+}
+
+void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
+ TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0);
+ llvm::formatted_raw_ostream FormattedOS;
+
+ CreatePasses();
+ switch (Action) {
+ case Backend_EmitNothing:
+ break;
+
+ case Backend_EmitBC:
+ getPerModulePasses()->add(createBitcodeWriterPass(*OS));
+ break;
+
+ case Backend_EmitLL:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ getPerModulePasses()->add(createPrintModulePass(&FormattedOS));
+ break;
+
+ default:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ if (!AddEmitPasses(Action, FormattedOS))
+ return;
+ }
+
+ // Run passes. For now we do all passes at once, but eventually we
+ // would like to have the option of streaming code generation.
+
+ if (PerFunctionPasses) {
+ PrettyStackTraceString CrashInfo("Per-function optimization");
+
+ PerFunctionPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(),
+ E = TheModule->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ PerFunctionPasses->run(*I);
+ PerFunctionPasses->doFinalization();
+ }
+
+ if (PerModulePasses) {
+ PrettyStackTraceString CrashInfo("Per-module optimization passes");
+ PerModulePasses->run(*TheModule);
+ }
+
+ if (CodeGenPasses) {
+ PrettyStackTraceString CrashInfo("Code generation");
+
+ CodeGenPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(),
+ E = TheModule->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ CodeGenPasses->run(*I);
+ CodeGenPasses->doFinalization();
+ }
+}
+
+void clang::EmitBackendOutput(Diagnostic &Diags, const CodeGenOptions &CGOpts,
+ const TargetOptions &TOpts, Module *M,
+ BackendAction Action, raw_ostream *OS) {
+ EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, M);
+
+ AsmHelper.EmitAssembly(Action, OS);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
new file mode 100644
index 0000000..04f1ef2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -0,0 +1,1267 @@
+//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Target/TargetData.h"
+#include <algorithm>
+
+using namespace clang;
+using namespace CodeGen;
+
+CGBlockInfo::CGBlockInfo(const char *N)
+ : Name(N), CXXThisRef(0), NeedsObjCSelf(false) {
+
+ // Skip asm prefix, if any.
+ if (Name && Name[0] == '\01')
+ ++Name;
+}
+
+
+llvm::Constant *CodeGenFunction::
+BuildDescriptorBlockDecl(const BlockExpr *BE, const CGBlockInfo &Info,
+ const llvm::StructType* Ty,
+ llvm::Constant *BlockVarLayout,
+ std::vector<HelperInfo> *NoteForHelper) {
+ bool BlockHasCopyDispose = Info.BlockHasCopyDispose;
+ CharUnits Size = Info.BlockSize;
+ const llvm::Type *UnsignedLongTy
+ = CGM.getTypes().ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *C;
+ std::vector<llvm::Constant*> Elts;
+
+ // reserved
+ C = llvm::ConstantInt::get(UnsignedLongTy, 0);
+ Elts.push_back(C);
+
+ // Size
+ // FIXME: What is the right way to say this doesn't fit? We should give
+ // a user diagnostic in that case. Better fix would be to change the
+ // API to size_t.
+ C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity());
+ Elts.push_back(C);
+
+ // optional copy/dispose helpers
+ if (BlockHasCopyDispose) {
+ // copy_func_helper_decl
+ Elts.push_back(BuildCopyHelper(Ty, NoteForHelper));
+
+ // destroy_func_decl
+ Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
+ }
+
+ // Signature. non-optional ObjC-style method descriptor @encode sequence
+ std::string BlockTypeEncoding;
+ CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
+
+ Elts.push_back(llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty));
+
+ // Layout.
+ C = BlockVarLayout;
+
+ Elts.push_back(C);
+
+ C = llvm::ConstantStruct::get(VMContext, Elts, false);
+
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ C, "__block_descriptor_tmp");
+ return C;
+}
+
+static void CollectBlockDeclRefInfo(const Stmt *S, CGBlockInfo &Info) {
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I != E; ++I)
+ if (*I)
+ CollectBlockDeclRefInfo(*I, Info);
+
+ // We want to ensure we walk down into block literals so we can find
+ // all nested BlockDeclRefExprs.
+ if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ Info.InnerBlocks.insert(BE->getBlockDecl());
+ CollectBlockDeclRefInfo(BE->getBody(), Info);
+ }
+
+ else if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
+ const ValueDecl *D = BDRE->getDecl();
+ // FIXME: Handle enums.
+ if (isa<FunctionDecl>(D))
+ return;
+
+ if (isa<ImplicitParamDecl>(D) &&
+ isa<ObjCMethodDecl>(D->getDeclContext()) &&
+ cast<ObjCMethodDecl>(D->getDeclContext())->getSelfDecl() == D) {
+ Info.NeedsObjCSelf = true;
+ return;
+ }
+
+ // Only Decls that escape are added.
+ if (!Info.InnerBlocks.count(D->getDeclContext()))
+ Info.DeclRefs.push_back(BDRE);
+ }
+
+ // Make sure to capture implicit 'self' references due to super calls.
+ else if (const ObjCMessageExpr *E = dyn_cast<ObjCMessageExpr>(S)) {
+ if (E->getReceiverKind() == ObjCMessageExpr::SuperClass ||
+ E->getReceiverKind() == ObjCMessageExpr::SuperInstance)
+ Info.NeedsObjCSelf = true;
+ }
+
+ // Getter/setter uses may also cause implicit super references,
+ // which we can check for with:
+ else if (isa<ObjCSuperExpr>(S))
+ Info.NeedsObjCSelf = true;
+
+ else if (isa<CXXThisExpr>(S))
+ Info.CXXThisRef = cast<CXXThisExpr>(S);
+}
+
+/// CanBlockBeGlobal - Given a CGBlockInfo struct, determines if a block can be
+/// declared as a global variable instead of on the stack.
+static bool CanBlockBeGlobal(const CGBlockInfo &Info) {
+ return Info.DeclRefs.empty();
+}
+
+/// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to
+/// ensure we can generate the debug information for the parameter for the block
+/// invoke function.
+static void AllocateAllBlockDeclRefs(CodeGenFunction &CGF, CGBlockInfo &Info) {
+ if (Info.CXXThisRef)
+ CGF.AllocateBlockCXXThisPointer(Info.CXXThisRef);
+
+ for (size_t i = 0; i < Info.DeclRefs.size(); ++i)
+ CGF.AllocateBlockDecl(Info.DeclRefs[i]);
+
+ if (Info.NeedsObjCSelf) {
+ ValueDecl *Self = cast<ObjCMethodDecl>(CGF.CurFuncDecl)->getSelfDecl();
+ BlockDeclRefExpr *BDRE =
+ new (CGF.getContext()) BlockDeclRefExpr(Self, Self->getType(),
+ SourceLocation(), false);
+ Info.DeclRefs.push_back(BDRE);
+ CGF.AllocateBlockDecl(BDRE);
+ }
+}
+
+static unsigned computeBlockFlag(CodeGenModule &CGM,
+ const BlockExpr *BE, unsigned flags) {
+ QualType BPT = BE->getType();
+ const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>();
+ QualType ResultType = ftype->getResultType();
+
+ CallArgList Args;
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args,
+ FunctionType::ExtInfo());
+ if (CGM.ReturnTypeUsesSRet(FnInfo))
+ flags |= CodeGenFunction::BLOCK_USE_STRET;
+ return flags;
+}
+
+// FIXME: Push most into CGM, passing down a few bits, like current function
+// name.
+llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
+ std::string Name = CurFn->getName();
+ CGBlockInfo Info(Name.c_str());
+ Info.InnerBlocks.insert(BE->getBlockDecl());
+ CollectBlockDeclRefInfo(BE->getBody(), Info);
+
+ // Check if the block can be global.
+ // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like
+ // to just have one code path. We should move this function into CGM and pass
+ // CGF, then we can just check to see if CGF is 0.
+ if (0 && CanBlockBeGlobal(Info))
+ return CGM.GetAddrOfGlobalBlock(BE, Name.c_str());
+
+ size_t BlockFields = 5;
+
+ std::vector<llvm::Constant*> Elts(BlockFields);
+
+ llvm::Constant *C;
+ llvm::Value *V;
+
+ {
+ llvm::Constant *BlockVarLayout;
+ // C = BuildBlockStructInitlist();
+ unsigned int flags = BLOCK_HAS_SIGNATURE;
+
+ // We run this first so that we set BlockHasCopyDispose from the entire
+ // block literal.
+ // __invoke
+ llvm::Function *Fn
+ = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, BE, Info, CurFuncDecl,
+ BlockVarLayout,
+ LocalDeclMap);
+ BlockHasCopyDispose |= Info.BlockHasCopyDispose;
+ Elts[3] = Fn;
+
+ // FIXME: Don't use BlockHasCopyDispose, it is set more often then
+ // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); }
+ if (Info.BlockHasCopyDispose)
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+
+ // __isa
+ C = CGM.getNSConcreteStackBlock();
+ C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
+ Elts[0] = C;
+
+ // __flags
+ flags = computeBlockFlag(CGM, BE, flags);
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+ C = llvm::ConstantInt::get(IntTy, flags);
+ Elts[1] = C;
+
+ // __reserved
+ C = llvm::ConstantInt::get(IntTy, 0);
+ Elts[2] = C;
+
+ if (Info.BlockLayout.empty()) {
+ // __descriptor
+ C = llvm::Constant::getNullValue(PtrToInt8Ty);
+ Elts[4] = BuildDescriptorBlockDecl(BE, Info, 0, C, 0);
+
+ // Optimize to being a global block.
+ Elts[0] = CGM.getNSConcreteGlobalBlock();
+
+ Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL);
+
+ C = llvm::ConstantStruct::get(VMContext, Elts, false);
+
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
+ llvm::GlobalValue::InternalLinkage, C,
+ "__block_holder_tmp_" +
+ llvm::Twine(CGM.getGlobalUniqueCount()));
+ QualType BPT = BE->getType();
+ C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
+ return C;
+ }
+
+ std::vector<const llvm::Type *> Types(BlockFields+Info.BlockLayout.size());
+ for (int i=0; i<4; ++i)
+ Types[i] = Elts[i]->getType();
+ Types[4] = PtrToInt8Ty;
+
+ for (unsigned i = 0, n = Info.BlockLayout.size(); i != n; ++i) {
+ const Expr *E = Info.BlockLayout[i];
+ const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
+ QualType Ty = E->getType();
+ if (BDRE && BDRE->isByRef()) {
+ Types[i+BlockFields] =
+ llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
+ } else if (BDRE && BDRE->getDecl()->getType()->isReferenceType()) {
+ Types[i+BlockFields] = llvm::PointerType::get(ConvertType(Ty), 0);
+ } else
+ Types[i+BlockFields] = ConvertType(Ty);
+ }
+
+ llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true);
+
+ llvm::AllocaInst *A = CreateTempAlloca(Ty);
+ A->setAlignment(Info.BlockAlign.getQuantity());
+ V = A;
+
+ // Build layout / cleanup information for all the data entries in the
+ // layout, and write the enclosing fields into the type.
+ std::vector<HelperInfo> NoteForHelper(Info.BlockLayout.size());
+ unsigned NumHelpers = 0;
+
+ for (unsigned i=0; i<4; ++i)
+ Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp"));
+
+ for (unsigned i=0; i < Info.BlockLayout.size(); ++i) {
+ const Expr *E = Info.BlockLayout[i];
+
+ // Skip padding.
+ if (isa<DeclRefExpr>(E)) continue;
+
+ llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp");
+ HelperInfo &Note = NoteForHelper[NumHelpers++];
+
+ Note.index = i+5;
+
+ if (isa<CXXThisExpr>(E)) {
+ Note.RequiresCopying = false;
+ Note.flag = BLOCK_FIELD_IS_OBJECT;
+
+ Builder.CreateStore(LoadCXXThis(), Addr);
+ continue;
+ }
+
+ const BlockDeclRefExpr *BDRE = cast<BlockDeclRefExpr>(E);
+ const ValueDecl *VD = BDRE->getDecl();
+ QualType T = VD->getType();
+
+ Note.RequiresCopying = BlockRequiresCopying(T);
+
+ if (BDRE->isByRef()) {
+ Note.flag = BLOCK_FIELD_IS_BYREF;
+ if (T.isObjCGCWeak())
+ Note.flag |= BLOCK_FIELD_IS_WEAK;
+ } else if (T->isBlockPointerType()) {
+ Note.flag = BLOCK_FIELD_IS_BLOCK;
+ } else {
+ Note.flag = BLOCK_FIELD_IS_OBJECT;
+ }
+
+ if (LocalDeclMap[VD]) {
+ if (BDRE->isByRef()) {
+ llvm::Value *Loc = LocalDeclMap[VD];
+ Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc);
+ Builder.CreateStore(Loc, Addr);
+ continue;
+ } else {
+ if (BDRE->getCopyConstructorExpr()) {
+ E = BDRE->getCopyConstructorExpr();
+ PushDestructorCleanup(E->getType(), Addr);
+ }
+ else {
+ E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
+ VD->getType().getNonReferenceType(),
+ SourceLocation());
+ if (VD->getType()->isReferenceType()) {
+ E = new (getContext())
+ UnaryOperator(const_cast<Expr*>(E), UO_AddrOf,
+ getContext().getPointerType(E->getType()),
+ SourceLocation());
+ }
+ }
+ }
+ }
+
+ if (BDRE->isByRef()) {
+ E = new (getContext())
+ UnaryOperator(const_cast<Expr*>(E), UO_AddrOf,
+ getContext().getPointerType(E->getType()),
+ SourceLocation());
+ }
+
+ RValue r = EmitAnyExpr(E, Addr, false);
+ if (r.isScalar()) {
+ llvm::Value *Loc = r.getScalarVal();
+ const llvm::Type *Ty = Types[i+BlockFields];
+ if (BDRE->isByRef()) {
+ // E is now the address of the value field, instead, we want the
+ // address of the actual ByRef struct. We optimize this slightly
+ // compared to gcc by not grabbing the forwarding slot as this must
+ // be done during Block_copy for us, and we can postpone the work
+ // until then.
+ CharUnits offset = BlockDecls[BDRE->getDecl()];
+
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+
+ Loc = Builder.CreateGEP(BlockLiteral,
+ llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
+ "block.literal");
+ Ty = llvm::PointerType::get(Ty, 0);
+ Loc = Builder.CreateBitCast(Loc, Ty);
+ Loc = Builder.CreateLoad(Loc);
+ // Loc = Builder.CreateBitCast(Loc, Ty);
+ }
+ Builder.CreateStore(Loc, Addr);
+ } else if (r.isComplex())
+ // FIXME: implement
+ ErrorUnsupported(BE, "complex in block literal");
+ else if (r.isAggregate())
+ ; // Already created into the destination
+ else
+ assert (0 && "bad block variable");
+ // FIXME: Ensure that the offset created by the backend for
+ // the struct matches the previously computed offset in BlockDecls.
+ }
+ NoteForHelper.resize(NumHelpers);
+
+ // __descriptor
+ llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE, Info, Ty,
+ BlockVarLayout,
+ &NoteForHelper);
+ Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
+ Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp"));
+ }
+
+ QualType BPT = BE->getType();
+ V = Builder.CreateBitCast(V, ConvertType(BPT));
+ // See if this is a __weak block variable and the must call objc_read_weak
+ // on it.
+ const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>();
+ QualType RES = ftype->getResultType();
+ if (RES.isObjCGCWeak()) {
+ // Must cast argument to id*
+ const llvm::Type *ObjectPtrTy =
+ ConvertType(CGM.getContext().getObjCIdType());
+ const llvm::Type *PtrObjectPtrTy =
+ llvm::PointerType::getUnqual(ObjectPtrTy);
+ V = Builder.CreateBitCast(V, PtrObjectPtrTy);
+ V = CGM.getObjCRuntime().EmitObjCWeakRead(*this, V);
+ }
+ return V;
+}
+
+
+const llvm::Type *BlockModule::getBlockDescriptorType() {
+ if (BlockDescriptorType)
+ return BlockDescriptorType;
+
+ const llvm::Type *UnsignedLongTy =
+ getTypes().ConvertType(getContext().UnsignedLongTy);
+
+ // struct __block_descriptor {
+ // unsigned long reserved;
+ // unsigned long block_size;
+ //
+ // // later, the following will be added
+ //
+ // struct {
+ // void (*copyHelper)();
+ // void (*copyHelper)();
+ // } helpers; // !!! optional
+ //
+ // const char *signature; // the block signature
+ // const char *layout; // reserved
+ // };
+ BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(),
+ UnsignedLongTy,
+ UnsignedLongTy,
+ NULL);
+
+ getModule().addTypeName("struct.__block_descriptor",
+ BlockDescriptorType);
+
+ return BlockDescriptorType;
+}
+
+const llvm::Type *BlockModule::getGenericBlockLiteralType() {
+ if (GenericBlockLiteralType)
+ return GenericBlockLiteralType;
+
+ const llvm::Type *BlockDescPtrTy =
+ llvm::PointerType::getUnqual(getBlockDescriptorType());
+
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ getTypes().ConvertType(getContext().IntTy));
+
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // };
+ GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
+ PtrToInt8Ty,
+ IntTy,
+ IntTy,
+ PtrToInt8Ty,
+ BlockDescPtrTy,
+ NULL);
+
+ getModule().addTypeName("struct.__block_literal_generic",
+ GenericBlockLiteralType);
+
+ return GenericBlockLiteralType;
+}
+
+
+RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
+ ReturnValueSlot ReturnValue) {
+ const BlockPointerType *BPT =
+ E->getCallee()->getType()->getAs<BlockPointerType>();
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+
+ // Get a pointer to the generic block literal.
+ const llvm::Type *BlockLiteralTy =
+ llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
+
+ // Bitcast the callee to a block literal.
+ llvm::Value *BlockLiteral =
+ Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
+
+ // Get the function pointer from the literal.
+ llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp");
+
+ BlockLiteral =
+ Builder.CreateBitCast(BlockLiteral,
+ llvm::Type::getInt8PtrTy(VMContext),
+ "tmp");
+
+ // Add the block literal.
+ QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy));
+
+ QualType FnType = BPT->getPointeeType();
+
+ // And the rest of the arguments.
+ EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(),
+ E->arg_begin(), E->arg_end());
+
+ // Load the function.
+ llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp");
+
+ const FunctionType *FuncTy = FnType->getAs<FunctionType>();
+ QualType ResultType = FuncTy->getResultType();
+
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().getFunctionInfo(ResultType, Args,
+ FuncTy->getExtInfo());
+
+ // Cast the function pointer to the right type.
+ const llvm::Type *BlockFTy =
+ CGM.getTypes().GetFunctionType(FnInfo, false);
+
+ const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
+ Func = Builder.CreateBitCast(Func, BlockFTyPtr);
+
+ // And call the block.
+ return EmitCall(FnInfo, Func, ReturnValue, Args);
+}
+
+void CodeGenFunction::AllocateBlockCXXThisPointer(const CXXThisExpr *E) {
+ assert(BlockCXXThisOffset.isZero() && "already computed 'this' pointer");
+
+ // Figure out what the offset is.
+ QualType T = E->getType();
+ std::pair<CharUnits,CharUnits> TypeInfo = getContext().getTypeInfoInChars(T);
+ CharUnits Offset = getBlockOffset(TypeInfo.first, TypeInfo.second);
+
+ BlockCXXThisOffset = Offset;
+ BlockLayout.push_back(E);
+}
+
+void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
+ const ValueDecl *VD = E->getDecl();
+ CharUnits &Offset = BlockDecls[VD];
+
+ // See if we have already allocated an offset for this variable.
+ if (!Offset.isZero())
+ return;
+
+ // Don't run the expensive check, unless we have to.
+ if (!BlockHasCopyDispose)
+ if (E->isByRef()
+ || BlockRequiresCopying(E->getType()))
+ BlockHasCopyDispose = true;
+
+ const ValueDecl *D = cast<ValueDecl>(E->getDecl());
+
+ CharUnits Size;
+ CharUnits Align;
+
+ if (E->isByRef()) {
+ llvm::tie(Size,Align) =
+ getContext().getTypeInfoInChars(getContext().VoidPtrTy);
+ } else {
+ Size = getContext().getTypeSizeInChars(D->getType());
+ Align = getContext().getDeclAlign(D);
+ }
+
+ Offset = getBlockOffset(Size, Align);
+ BlockLayout.push_back(E);
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
+ bool IsByRef) {
+
+ CharUnits offset = BlockDecls[VD];
+ assert(!offset.isZero() && "getting address of unallocated decl");
+
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+ llvm::Value *V = Builder.CreateGEP(BlockLiteral,
+ llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
+ "block.literal");
+ if (IsByRef) {
+ const llvm::Type *PtrStructTy
+ = llvm::PointerType::get(BuildByRefType(VD), 0);
+ // The block literal will need a copy/destroy helper.
+ BlockHasCopyDispose = true;
+
+ const llvm::Type *Ty = PtrStructTy;
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateBitCast(V, Ty);
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, 1, "forwarding");
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateBitCast(V, PtrStructTy);
+ V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
+ VD->getNameAsString());
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V);
+ } else {
+ const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType());
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateBitCast(V, Ty);
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "ref.tmp");
+ }
+ return V;
+}
+
+llvm::Constant *
+BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
+ // Generate the block descriptor.
+ const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy);
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ getTypes().ConvertType(getContext().IntTy));
+
+ llvm::Constant *DescriptorFields[4];
+
+ // Reserved
+ DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy);
+
+ // Block literal size. For global blocks we just use the size of the generic
+ // block literal struct.
+ CharUnits BlockLiteralSize =
+ CGM.GetTargetTypeStoreSize(getGenericBlockLiteralType());
+ DescriptorFields[1] =
+ llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity());
+
+ // signature. non-optional ObjC-style method descriptor @encode sequence
+ std::string BlockTypeEncoding;
+ CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
+
+ DescriptorFields[2] = llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty);
+
+ // layout
+ DescriptorFields[3] =
+ llvm::ConstantInt::get(UnsignedLongTy,0);
+
+ // build the structure from the 4 elements
+ llvm::Constant *DescriptorStruct =
+ llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 4, false);
+
+ llvm::GlobalVariable *Descriptor =
+ new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true,
+ llvm::GlobalVariable::InternalLinkage,
+ DescriptorStruct, "__block_descriptor_global");
+
+ int FieldCount = 5;
+ // Generate the constants for the block literal.
+
+ std::vector<llvm::Constant*> LiteralFields(FieldCount);
+
+ CGBlockInfo Info(n);
+ llvm::Constant *BlockVarLayout;
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ llvm::Function *Fn
+ = CodeGenFunction(CGM).GenerateBlockFunction(GlobalDecl(), BE,
+ Info, 0, BlockVarLayout,
+ LocalDeclMap);
+ assert(Info.BlockSize == BlockLiteralSize
+ && "no imports allowed for global block");
+
+ // isa
+ LiteralFields[0] = CGM.getNSConcreteGlobalBlock();
+
+ // __flags
+ unsigned flags = computeBlockFlag(CGM, BE,
+ (BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE));
+ LiteralFields[1] =
+ llvm::ConstantInt::get(IntTy, flags);
+
+ // Reserved
+ LiteralFields[2] = llvm::Constant::getNullValue(IntTy);
+
+ // Function
+ LiteralFields[3] = Fn;
+
+ // Descriptor
+ LiteralFields[4] = Descriptor;
+
+ llvm::Constant *BlockLiteralStruct =
+ llvm::ConstantStruct::get(VMContext, LiteralFields, false);
+
+ llvm::GlobalVariable *BlockLiteral =
+ new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true,
+ llvm::GlobalVariable::InternalLinkage,
+ BlockLiteralStruct, "__block_literal_global");
+
+ return BlockLiteral;
+}
+
+llvm::Value *CodeGenFunction::LoadBlockStruct() {
+ llvm::Value *V = Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()],
+ "self");
+ // For now, we codegen based upon byte offsets.
+ return Builder.CreateBitCast(V, PtrToInt8Ty);
+}
+
+llvm::Function *
+CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
+ CGBlockInfo &Info,
+ const Decl *OuterFuncDecl,
+ llvm::Constant *& BlockVarLayout,
+ llvm::DenseMap<const Decl*, llvm::Value*> ldm) {
+
+ // Check if we should generate debug info for this block.
+ if (CGM.getDebugInfo())
+ DebugInfo = CGM.getDebugInfo();
+
+ // Arrange for local static and local extern declarations to appear
+ // to be local to this function as well, as they are directly referenced
+ // in a block.
+ for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin();
+ i != ldm.end();
+ ++i) {
+ const VarDecl *VD = dyn_cast<VarDecl>(i->first);
+
+ if (VD->getStorageClass() == SC_Static || VD->hasExternalStorage())
+ LocalDeclMap[VD] = i->second;
+ }
+
+ BlockOffset =
+ CGM.GetTargetTypeStoreSize(CGM.getGenericBlockLiteralType());
+ BlockAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
+
+ const FunctionType *BlockFunctionType = BExpr->getFunctionType();
+ QualType ResultType;
+ FunctionType::ExtInfo EInfo = getFunctionExtInfo(*BlockFunctionType);
+ bool IsVariadic;
+ if (const FunctionProtoType *FTy =
+ dyn_cast<FunctionProtoType>(BlockFunctionType)) {
+ ResultType = FTy->getResultType();
+ IsVariadic = FTy->isVariadic();
+ } else {
+ // K&R style block.
+ ResultType = BlockFunctionType->getResultType();
+ IsVariadic = false;
+ }
+
+ FunctionArgList Args;
+
+ CurFuncDecl = OuterFuncDecl;
+
+ const BlockDecl *BD = BExpr->getBlockDecl();
+
+ IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
+
+ // Build the block struct now.
+ AllocateAllBlockDeclRefs(*this, Info);
+
+ // Capture block layout info. here.
+ if (CGM.getContext().getLangOptions().ObjC1)
+ BlockVarLayout = CGM.getObjCRuntime().GCBlockLayout(*this, Info.DeclRefs);
+ else
+ BlockVarLayout = llvm::Constant::getNullValue(PtrToInt8Ty);
+
+ QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose,
+ BlockLayout);
+
+ // FIXME: This leaks
+ ImplicitParamDecl *SelfDecl =
+ ImplicitParamDecl::Create(getContext(), const_cast<BlockDecl*>(BD),
+ SourceLocation(), II,
+ ParmTy);
+
+ Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType()));
+ BlockStructDecl = SelfDecl;
+
+ for (BlockDecl::param_const_iterator i = BD->param_begin(),
+ e = BD->param_end(); i != e; ++i)
+ Args.push_back(std::make_pair(*i, (*i)->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(ResultType, Args, EInfo);
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
+
+ MangleBuffer Name;
+ CGM.getMangledName(GD, Name, BD);
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name.getString(), &CGM.getModule());
+
+ CGM.SetInternalFunctionAttributes(BD, Fn, FI);
+ StartFunction(BD, ResultType, Fn, Args,
+ BExpr->getBody()->getLocEnd());
+
+ CurFuncDecl = OuterFuncDecl;
+
+ QualType FnType(BlockFunctionType, 0);
+ bool HasPrototype = isa<FunctionProtoType>(BlockFunctionType);
+
+ IdentifierInfo *ID = &getContext().Idents.get(Name.getString());
+ CurCodeDecl = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), ID, FnType,
+ 0,
+ SC_Static,
+ SC_None,
+ false, HasPrototype);
+ if (FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FnType)) {
+ const FunctionDecl *CFD = dyn_cast<FunctionDecl>(CurCodeDecl);
+ FunctionDecl *FD = const_cast<FunctionDecl *>(CFD);
+ llvm::SmallVector<ParmVarDecl*, 16> Params;
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i)
+ Params.push_back(ParmVarDecl::Create(getContext(), FD,
+ SourceLocation(), 0,
+ FT->getArgType(i), /*TInfo=*/0,
+ SC_None, SC_None, 0));
+ FD->setParams(Params.data(), Params.size());
+ }
+
+
+ // If we have a C++ 'this' reference, go ahead and force it into
+ // existence now.
+ if (Info.CXXThisRef) {
+ assert(!BlockCXXThisOffset.isZero() &&
+ "haven't yet allocated 'this' reference");
+
+ // TODO: I have a dream that one day this will be typed.
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+ llvm::Value *ThisPtrRaw =
+ Builder.CreateConstInBoundsGEP1_64(BlockLiteral,
+ BlockCXXThisOffset.getQuantity(),
+ "this.ptr.raw");
+
+ const llvm::Type *Ty =
+ CGM.getTypes().ConvertType(Info.CXXThisRef->getType());
+ Ty = llvm::PointerType::get(Ty, 0);
+ llvm::Value *ThisPtr = Builder.CreateBitCast(ThisPtrRaw, Ty, "this.ptr");
+
+ CXXThisValue = Builder.CreateLoad(ThisPtr, "this");
+ }
+
+ // If we have an Objective C 'self' reference, go ahead and force it
+ // into existence now.
+ if (Info.NeedsObjCSelf) {
+ ValueDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
+ LocalDeclMap[Self] = GetAddrOfBlockDecl(Self, false);
+ }
+
+ // Save a spot to insert the debug information for all the BlockDeclRefDecls.
+ llvm::BasicBlock *entry = Builder.GetInsertBlock();
+ llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
+ --entry_ptr;
+
+ EmitStmt(BExpr->getBody());
+
+ // Remember where we were...
+ llvm::BasicBlock *resume = Builder.GetInsertBlock();
+
+ // Go back to the entry.
+ ++entry_ptr;
+ Builder.SetInsertPoint(entry, entry_ptr);
+
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ // Emit debug information for all the BlockDeclRefDecls.
+ // FIXME: also for 'this'
+ for (unsigned i = 0, e = BlockLayout.size(); i != e; ++i) {
+ if (const BlockDeclRefExpr *BDRE =
+ dyn_cast<BlockDeclRefExpr>(BlockLayout[i])) {
+ const ValueDecl *D = BDRE->getDecl();
+ DI->setLocation(D->getLocation());
+ DI->EmitDeclareOfBlockDeclRefVariable(BDRE,
+ LocalDeclMap[getBlockStructDecl()],
+ Builder, this);
+ }
+ }
+ }
+ // And resume where we left off.
+ if (resume == 0)
+ Builder.ClearInsertionPoint();
+ else
+ Builder.SetInsertPoint(resume);
+
+ FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc());
+
+ // The runtime needs a minimum alignment of a void *.
+ CharUnits MinAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
+ BlockOffset = CharUnits::fromQuantity(
+ llvm::RoundUpToAlignment(BlockOffset.getQuantity(),
+ MinAlign.getQuantity()));
+
+ Info.BlockSize = BlockOffset;
+ Info.BlockAlign = BlockAlign;
+ Info.BlockLayout = BlockLayout;
+ Info.BlockHasCopyDispose = BlockHasCopyDispose;
+ return Fn;
+}
+
+CharUnits BlockFunction::getBlockOffset(CharUnits Size, CharUnits Align) {
+ assert((Align.isPositive()) && "alignment must be 1 byte or more");
+
+ CharUnits OldOffset = BlockOffset;
+
+ // Ensure proper alignment, even if it means we have to have a gap
+ BlockOffset = CharUnits::fromQuantity(
+ llvm::RoundUpToAlignment(BlockOffset.getQuantity(), Align.getQuantity()));
+ BlockAlign = std::max(Align, BlockAlign);
+
+ CharUnits Pad = BlockOffset - OldOffset;
+ if (Pad.isPositive()) {
+ QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
+ llvm::APInt(32,
+ Pad.getQuantity()),
+ ArrayType::Normal, 0);
+ ValueDecl *PadDecl = VarDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(),
+ 0, QualType(PadTy), 0,
+ SC_None, SC_None);
+ Expr *E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
+ SourceLocation());
+ BlockLayout.push_back(E);
+ }
+
+ BlockOffset += Size;
+ return BlockOffset - Size;
+}
+
+llvm::Constant *BlockFunction::
+GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__copy_helper_block_", &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ SC_Static,
+ SC_None,
+ false,
+ true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
+ llvm::Type *PtrPtrT;
+
+ if (NoteForHelperp) {
+ std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
+
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
+ SrcObj = Builder.CreateLoad(SrcObj);
+
+ llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst);
+ llvm::Type *PtrPtrT;
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ DstObj = Builder.CreateBitCast(DstObj, PtrPtrT);
+ DstObj = Builder.CreateLoad(DstObj);
+
+ for (unsigned i=0; i < NoteForHelper.size(); ++i) {
+ int flag = NoteForHelper[i].flag;
+ int index = NoteForHelper[i].index;
+
+ if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
+ || NoteForHelper[i].RequiresCopying) {
+ llvm::Value *Srcv = SrcObj;
+ Srcv = Builder.CreateStructGEP(Srcv, index);
+ Srcv = Builder.CreateBitCast(Srcv,
+ llvm::PointerType::get(PtrToInt8Ty, 0));
+ Srcv = Builder.CreateLoad(Srcv);
+
+ llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
+ Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
+
+ llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
+ llvm::Value *F = CGM.getBlockObjectAssign();
+ Builder.CreateCall3(F, Dstv, Srcv, N);
+ }
+ }
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::
+GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
+ const llvm::StructType* T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__destroy_helper_block_", &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__destroy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ SC_Static,
+ SC_None,
+ false, true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ if (NoteForHelperp) {
+ std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
+
+ llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
+ llvm::Type *PtrPtrT;
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
+ SrcObj = Builder.CreateLoad(SrcObj);
+
+ for (unsigned i=0; i < NoteForHelper.size(); ++i) {
+ int flag = NoteForHelper[i].flag;
+ int index = NoteForHelper[i].index;
+
+ if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
+ || NoteForHelper[i].RequiresCopying) {
+ llvm::Value *Srcv = SrcObj;
+ Srcv = Builder.CreateStructGEP(Srcv, index);
+ Srcv = Builder.CreateBitCast(Srcv,
+ llvm::PointerType::get(PtrToInt8Ty, 0));
+ Srcv = Builder.CreateLoad(Srcv);
+
+ BuildBlockRelease(Srcv, flag);
+ }
+ }
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelper) {
+ return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose,
+ T, NoteForHelper);
+}
+
+llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose,
+ T, NoteForHelperp);
+}
+
+llvm::Constant *BlockFunction::
+GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__Block_byref_id_object_copy_", &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ SC_Static,
+ SC_None,
+ false, true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ // dst->x
+ llvm::Value *V = CGF.GetAddrOfLocalVar(Dst);
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty);
+
+ // src->x
+ V = CGF.GetAddrOfLocalVar(Src);
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateBitCast(V, T);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
+ llvm::Value *SrcObj = Builder.CreateLoad(V);
+
+ flag |= BLOCK_BYREF_CALLER;
+
+ llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
+ llvm::Value *F = CGM.getBlockObjectAssign();
+ Builder.CreateCall3(F, DstObj, SrcObj, N);
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *
+BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
+ int flag) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__Block_byref_id_object_dispose_",
+ &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ SC_Static,
+ SC_None,
+ false, true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ llvm::Value *V = CGF.GetAddrOfLocalVar(Src);
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
+ V = Builder.CreateLoad(V);
+
+ flag |= BLOCK_BYREF_CALLER;
+ BuildBlockRelease(V, flag);
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
+ int Flag, unsigned Align) {
+ // All alignments below that of pointer alignment collapse down to just
+ // pointer alignment, as we always have at least that much alignment to begin
+ // with.
+ Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+
+ // As an optimization, we only generate a single function of each kind we
+ // might need. We need a different one for each alignment and for each
+ // setting of flags. We mix Align and flag to get the kind.
+ uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
+ llvm::Constant *&Entry = CGM.AssignCache[Kind];
+ if (Entry)
+ return Entry;
+ return Entry = CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, Flag);
+}
+
+llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
+ int Flag,
+ unsigned Align) {
+ // All alignments below that of pointer alignment collpase down to just
+ // pointer alignment, as we always have at least that much alignment to begin
+ // with.
+ Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+
+ // As an optimization, we only generate a single function of each kind we
+ // might need. We need a different one for each alignment and for each
+ // setting of flags. We mix Align and flag to get the kind.
+ uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
+ llvm::Constant *&Entry = CGM.DestroyCache[Kind];
+ if (Entry)
+ return Entry;
+ return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag);
+}
+
+void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
+ llvm::Value *F = CGM.getBlockObjectDispose();
+ llvm::Value *N;
+ V = Builder.CreateBitCast(V, PtrToInt8Ty);
+ N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
+ Builder.CreateCall2(F, V, N);
+}
+
+ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
+
+BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
+ CGBuilderTy &B)
+ : CGM(cgm), VMContext(cgm.getLLVMContext()), CGF(cgf), Builder(B) {
+ PtrToInt8Ty = llvm::PointerType::getUnqual(
+ llvm::Type::getInt8Ty(VMContext));
+
+ BlockHasCopyDispose = false;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
new file mode 100644
index 0000000..743e3c8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
@@ -0,0 +1,205 @@
+//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal state used for llvm translation for block literals.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBLOCKS_H
+#define CLANG_CODEGEN_CGBLOCKS_H
+
+#include "CodeGenTypes.h"
+#include "clang/AST/Type.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallVector.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+
+#include <vector>
+#include <map>
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+ class PointerType;
+ class Value;
+ class LLVMContext;
+}
+
+namespace clang {
+
+namespace CodeGen {
+class CodeGenModule;
+
+class BlockBase {
+public:
+ enum {
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_USE_STRET = (1 << 29),
+ BLOCK_HAS_SIGNATURE = (1 << 30)
+ };
+};
+
+
+class BlockModule : public BlockBase {
+ ASTContext &Context;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ CodeGenTypes &Types;
+ CodeGenModule &CGM;
+ llvm::LLVMContext &VMContext;
+
+ ASTContext &getContext() const { return Context; }
+ llvm::Module &getModule() const { return TheModule; }
+ CodeGenTypes &getTypes() { return Types; }
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+public:
+ int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; }
+ const llvm::Type *getBlockDescriptorType();
+
+ const llvm::Type *getGenericBlockLiteralType();
+
+ llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+
+ const llvm::Type *BlockDescriptorType;
+ const llvm::Type *GenericBlockLiteralType;
+
+ struct {
+ int GlobalUniqueCount;
+ } Block;
+
+ const llvm::PointerType *PtrToInt8Ty;
+
+ std::map<uint64_t, llvm::Constant *> AssignCache;
+ std::map<uint64_t, llvm::Constant *> DestroyCache;
+
+ BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
+ CodeGenTypes &T, CodeGenModule &CodeGen)
+ : Context(C), TheModule(M), TheTargetData(TD), Types(T),
+ CGM(CodeGen), VMContext(M.getContext()),
+ BlockDescriptorType(0), GenericBlockLiteralType(0) {
+ Block.GlobalUniqueCount = 0;
+ PtrToInt8Ty = llvm::Type::getInt8PtrTy(M.getContext());
+ }
+
+ bool BlockRequiresCopying(QualType Ty)
+ { return getContext().BlockRequiresCopying(Ty); }
+};
+
+class BlockFunction : public BlockBase {
+ CodeGenModule &CGM;
+ ASTContext &getContext() const;
+
+protected:
+ llvm::LLVMContext &VMContext;
+
+public:
+ CodeGenFunction &CGF;
+
+ const llvm::PointerType *PtrToInt8Ty;
+ struct HelperInfo {
+ int index;
+ int flag;
+ bool RequiresCopying;
+ };
+
+ enum {
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
+ BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the __block
+ variable */
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+ };
+
+ CGBuilderTy &Builder;
+
+ BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B);
+
+ /// BlockOffset - The offset in bytes for the next allocation of an
+ /// imported block variable.
+ CharUnits BlockOffset;
+ /// BlockAlign - Maximal alignment needed for the Block expressed in
+ /// characters.
+ CharUnits BlockAlign;
+
+ /// getBlockOffset - Allocate a location within the block's storage
+ /// for a value with the given size and alignment requirements.
+ CharUnits getBlockOffset(CharUnits Size, CharUnits Align);
+
+ /// BlockHasCopyDispose - True iff the block uses copy/dispose.
+ bool BlockHasCopyDispose;
+
+ /// BlockLayout - The layout of the block's storage, represented as
+ /// a sequence of expressions which require such storage. The
+ /// expressions can be:
+ /// - a BlockDeclRefExpr, indicating that the given declaration
+ /// from an enclosing scope is needed by the block;
+ /// - a DeclRefExpr, which always wraps an anonymous VarDecl with
+ /// array type, used to insert padding into the block; or
+ /// - a CXXThisExpr, indicating that the C++ 'this' value should
+ /// propagate from the parent to the block.
+ /// This is a really silly representation.
+ llvm::SmallVector<const Expr *, 8> BlockLayout;
+
+ /// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
+ llvm::DenseMap<const Decl*, CharUnits> BlockDecls;
+
+ /// BlockCXXThisOffset - The offset of the C++ 'this' value within
+ /// the block structure.
+ CharUnits BlockCXXThisOffset;
+
+ ImplicitParamDecl *BlockStructDecl;
+ ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; }
+
+ llvm::Constant *GenerateCopyHelperFunction(bool, const llvm::StructType *,
+ std::vector<HelperInfo> *);
+ llvm::Constant *GenerateDestroyHelperFunction(bool, const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Constant *BuildCopyHelper(const llvm::StructType *,
+ std::vector<HelperInfo> *);
+ llvm::Constant *BuildDestroyHelper(const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, int flag);
+ llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, int);
+
+ llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag,
+ unsigned Align);
+ llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag,
+ unsigned Align);
+
+ void BuildBlockRelease(llvm::Value *DeclPtr, int flag = BLOCK_FIELD_IS_BYREF);
+
+ bool BlockRequiresCopying(QualType Ty)
+ { return getContext().BlockRequiresCopying(Ty); }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
new file mode 100644
index 0000000..8120217
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
@@ -0,0 +1,28 @@
+//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBUILDER_H
+#define CLANG_CODEGEN_CGBUILDER_H
+
+#include "llvm/Support/IRBuilder.h"
+
+namespace clang {
+namespace CodeGen {
+
+// Don't preserve names on values in an optimized build.
+#ifdef NDEBUG
+typedef llvm::IRBuilder<false> CGBuilderTy;
+#else
+typedef llvm::IRBuilder<> CGBuilderTy;
+#endif
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
new file mode 100644
index 0000000..986f621
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -0,0 +1,2182 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Builtin calls as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm;
+
+static void EmitMemoryBarrier(CodeGenFunction &CGF,
+ bool LoadLoad, bool LoadStore,
+ bool StoreLoad, bool StoreStore,
+ bool Device) {
+ Value *True = llvm::ConstantInt::getTrue(CGF.getLLVMContext());
+ Value *False = llvm::ConstantInt::getFalse(CGF.getLLVMContext());
+ Value *C[5] = { LoadLoad ? True : False,
+ LoadStore ? True : False,
+ StoreLoad ? True : False,
+ StoreStore ? True : False,
+ Device ? True : False };
+ CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier),
+ C, C + 5);
+}
+
+static Value *EmitCastToInt(CodeGenFunction &CGF,
+ const llvm::Type *ToType, Value *Val) {
+ if (Val->getType()->isPointerTy()) {
+ return CGF.Builder.CreatePtrToInt(Val, ToType);
+ }
+ assert(Val->getType()->isIntegerTy() &&
+ "Used a non-integer and non-pointer type with atomic builtin");
+ assert(Val->getType()->getScalarSizeInBits() <=
+ ToType->getScalarSizeInBits() && "Integer type too small");
+ return CGF.Builder.CreateSExtOrBitCast(Val, ToType);
+}
+
+static Value *EmitCastFromInt(CodeGenFunction &CGF, QualType ToQualType,
+ Value *Val) {
+ const llvm::Type *ToType = CGF.ConvertType(ToQualType);
+ if (ToType->isPointerTy()) {
+ return CGF.Builder.CreateIntToPtr(Val, ToType);
+ }
+ assert(Val->getType()->isIntegerTy() &&
+ "Used a non-integer and non-pointer type with atomic builtin");
+ assert(Val->getType()->getScalarSizeInBits() >=
+ ToType->getScalarSizeInBits() && "Integer type too small");
+ return CGF.Builder.CreateTruncOrBitCast(Val, ToType);
+}
+
+// The atomic builtins are also full memory barriers. This is a utility for
+// wrapping a call to the builtins with memory barriers.
+static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
+ Value **ArgBegin, Value **ArgEnd) {
+ // FIXME: We need a target hook for whether this applies to device memory or
+ // not.
+ bool Device = true;
+
+ // Create barriers both before and after the call.
+ EmitMemoryBarrier(CGF, true, true, true, true, Device);
+ Value *Result = CGF.Builder.CreateCall(Fn, ArgBegin, ArgEnd);
+ EmitMemoryBarrier(CGF, true, true, true, true, Device);
+ return Result;
+}
+
+/// Utility to insert an atomic instruction based on Instrinsic::ID
+/// and the expression node.
+static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
+ Intrinsic::ID Id, const CallExpr *E) {
+ const llvm::Type *ValueType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(E->getType()));
+ const llvm::Type *PtrType = ValueType->getPointerTo();
+ const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+
+ Value *Args[2] = { CGF.Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
+ PtrType),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(1))) };
+ return RValue::get(EmitCastFromInt(CGF, E->getType(),
+ EmitCallWithBarrier(CGF, AtomF, Args,
+ Args + 2)));
+}
+
+/// Utility to insert an atomic instruction based Instrinsic::ID and
+// the expression node, where the return value is the result of the
+// operation.
+static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
+ Intrinsic::ID Id, const CallExpr *E,
+ Instruction::BinaryOps Op) {
+ const llvm::Type *ValueType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(E->getType()));
+ const llvm::Type *PtrType = ValueType->getPointerTo();
+ const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+
+ Value *Args[2] = { CGF.Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
+ PtrType),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(1))) };
+ Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ return RValue::get(EmitCastFromInt(CGF, E->getType(),
+ CGF.Builder.CreateBinOp(Op, Result,
+ Args[1])));
+}
+
+/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
+/// which must be a scalar floating point type.
+static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
+ const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
+ assert(ValTyP && "isn't scalar fp type!");
+
+ StringRef FnName;
+ switch (ValTyP->getKind()) {
+ default: assert(0 && "Isn't a scalar fp type!");
+ case BuiltinType::Float: FnName = "fabsf"; break;
+ case BuiltinType::Double: FnName = "fabs"; break;
+ case BuiltinType::LongDouble: FnName = "fabsl"; break;
+ }
+
+ // The prototype is something that takes and returns whatever V's type is.
+ std::vector<const llvm::Type*> Args;
+ Args.push_back(V->getType());
+ llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), Args, false);
+ llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
+
+ return CGF.Builder.CreateCall(Fn, V, "abs");
+}
+
+RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E) {
+ // See if we can constant fold this builtin. If so, don't emit it at all.
+ Expr::EvalResult Result;
+ if (E->Evaluate(Result, CGM.getContext())) {
+ if (Result.Val.isInt())
+ return RValue::get(llvm::ConstantInt::get(VMContext,
+ Result.Val.getInt()));
+ else if (Result.Val.isFloat())
+ return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
+ }
+
+ switch (BuiltinID) {
+ default: break; // Handle intrinsics and libm functions below.
+ case Builtin::BI__builtin___CFStringMakeConstantString:
+ case Builtin::BI__builtin___NSStringMakeConstantString:
+ return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ case Builtin::BI__builtin_va_end: {
+ Value *ArgValue = EmitVAListRef(E->getArg(0));
+ const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
+ if (ArgValue->getType() != DestType)
+ ArgValue = Builder.CreateBitCast(ArgValue, DestType,
+ ArgValue->getName().data());
+
+ Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
+ Intrinsic::vaend : Intrinsic::vastart;
+ return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
+ }
+ case Builtin::BI__builtin_va_copy: {
+ Value *DstPtr = EmitVAListRef(E->getArg(0));
+ Value *SrcPtr = EmitVAListRef(E->getArg(1));
+
+ const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
+
+ DstPtr = Builder.CreateBitCast(DstPtr, Type);
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
+ return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
+ DstPtr, SrcPtr));
+ }
+ case Builtin::BI__builtin_abs: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
+ Value *CmpResult =
+ Builder.CreateICmpSGE(ArgValue,
+ llvm::Constant::getNullValue(ArgValue->getType()),
+ "abscond");
+ Value *Result =
+ Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
+
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ctz:
+ case Builtin::BI__builtin_ctzl:
+ case Builtin::BI__builtin_ctzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_clz:
+ case Builtin::BI__builtin_clzl:
+ case Builtin::BI__builtin_clzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll: {
+ // ffs(x) -> x ? cttz(x) + 1 : 0
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
+ llvm::ConstantInt::get(ArgType, 1), "tmp");
+ Value *Zero = llvm::Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_parity:
+ case Builtin::BI__builtin_parityl:
+ case Builtin::BI__builtin_parityll: {
+ // parity(x) -> ctpop(x) & 1
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
+ Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
+ "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_popcount:
+ case Builtin::BI__builtin_popcountl:
+ case Builtin::BI__builtin_popcountll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_expect: {
+ // FIXME: pass expect through to LLVM
+ if (E->getArg(1)->HasSideEffects(getContext()))
+ (void)EmitScalarExpr(E->getArg(1));
+ return RValue::get(EmitScalarExpr(E->getArg(0)));
+ }
+ case Builtin::BI__builtin_bswap32:
+ case Builtin::BI__builtin_bswap64: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
+ return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
+ }
+ case Builtin::BI__builtin_object_size: {
+ // We pass this builtin onto the optimizer so that it can
+ // figure out the object size in more complex cases.
+ const llvm::Type *ResType[] = {
+ ConvertType(E->getType())
+ };
+
+ // LLVM only supports 0 and 2, make sure that we pass along that
+ // as a boolean.
+ Value *Ty = EmitScalarExpr(E->getArg(1));
+ ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
+ assert(CI);
+ uint64_t val = CI->getZExtValue();
+ CI = ConstantInt::get(llvm::Type::getInt1Ty(VMContext), (val & 0x2) >> 1);
+
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
+ return RValue::get(Builder.CreateCall2(F,
+ EmitScalarExpr(E->getArg(0)),
+ CI));
+ }
+ case Builtin::BI__builtin_prefetch: {
+ Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
+ // FIXME: Technically these constants should of type 'int', yes?
+ RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
+ llvm::ConstantInt::get(Int32Ty, 0);
+ Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
+ llvm::ConstantInt::get(Int32Ty, 3);
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
+ return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
+ }
+ case Builtin::BI__builtin_trap: {
+ Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case Builtin::BI__builtin_unreachable: {
+ if (CatchUndefined && HaveInsertPoint())
+ EmitBranch(getTrapBB());
+ Value *V = Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return RValue::get(V);
+ }
+
+ case Builtin::BI__builtin_powi:
+ case Builtin::BI__builtin_powif:
+ case Builtin::BI__builtin_powil: {
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ }
+
+ case Builtin::BI__builtin_isgreater:
+ case Builtin::BI__builtin_isgreaterequal:
+ case Builtin::BI__builtin_isless:
+ case Builtin::BI__builtin_islessequal:
+ case Builtin::BI__builtin_islessgreater:
+ case Builtin::BI__builtin_isunordered: {
+ // Ordered comparisons: we know the arguments to these are matching scalar
+ // floating point values.
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unknown ordered comparison");
+ case Builtin::BI__builtin_isgreater:
+ LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isgreaterequal:
+ LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isless:
+ LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessequal:
+ LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessgreater:
+ LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isunordered:
+ LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
+ break;
+ }
+ // ZExt bool to int type.
+ return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
+ "tmp"));
+ }
+ case Builtin::BI__builtin_isnan: {
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = Builder.CreateFCmpUNO(V, V, "cmp");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+ }
+
+ case Builtin::BI__builtin_isinf: {
+ // isinf(x) --> fabs(x) == infinity
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = EmitFAbs(*this, V, E->getArg(0)->getType());
+
+ V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+ }
+
+ // TODO: BI__builtin_isinf_sign
+ // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
+
+ case Builtin::BI__builtin_isnormal: {
+ // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *IsLessThanInf =
+ Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
+ V = Builder.CreateAnd(V, IsNormal, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isfinite: {
+ // isfinite(x) --> x == x && fabs(x) != infinity; }
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *IsNotInf =
+ Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+
+ V = Builder.CreateAnd(Eq, IsNotInf, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_fpclassify: {
+ Value *V = EmitScalarExpr(E->getArg(5));
+ const llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
+
+ // Create Result
+ BasicBlock *Begin = Builder.GetInsertBlock();
+ BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
+ Builder.SetInsertPoint(End);
+ PHINode *Result =
+ Builder.CreatePHI(ConvertType(E->getArg(0)->getType()),
+ "fpclassify_result");
+
+ // if (V==0) return FP_ZERO
+ Builder.SetInsertPoint(Begin);
+ Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
+ "iszero");
+ Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
+ BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
+ Builder.CreateCondBr(IsZero, End, NotZero);
+ Result->addIncoming(ZeroLiteral, Begin);
+
+ // if (V != V) return FP_NAN
+ Builder.SetInsertPoint(NotZero);
+ Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
+ Value *NanLiteral = EmitScalarExpr(E->getArg(0));
+ BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
+ Builder.CreateCondBr(IsNan, End, NotNan);
+ Result->addIncoming(NanLiteral, NotZero);
+
+ // if (fabs(V) == infinity) return FP_INFINITY
+ Builder.SetInsertPoint(NotNan);
+ Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
+ Value *IsInf =
+ Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
+ "isinf");
+ Value *InfLiteral = EmitScalarExpr(E->getArg(1));
+ BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
+ Builder.CreateCondBr(IsInf, End, NotInf);
+ Result->addIncoming(InfLiteral, NotNan);
+
+ // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
+ Builder.SetInsertPoint(NotInf);
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ Value *NormalResult =
+ Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)));
+ Builder.CreateBr(End);
+ Result->addIncoming(NormalResult, NotInf);
+
+ // return Result
+ Builder.SetInsertPoint(End);
+ return RValue::get(Result);
+ }
+
+ case Builtin::BIalloca:
+ case Builtin::BI__builtin_alloca: {
+ Value *Size = EmitScalarExpr(E->getArg(0));
+ return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
+ }
+ case Builtin::BIbzero:
+ case Builtin::BI__builtin_bzero: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SizeVal = EmitScalarExpr(E->getArg(1));
+ Builder.CreateCall5(CGM.getMemSetFn(Address->getType(), SizeVal->getType()),
+ Address,
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
+ SizeVal,
+ llvm::ConstantInt::get(Int32Ty, 1),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ return RValue::get(Address);
+ }
+ case Builtin::BImemcpy:
+ case Builtin::BI__builtin_memcpy: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ Builder.CreateCall5(CGM.getMemCpyFn(Address->getType(), SrcAddr->getType(),
+ SizeVal->getType()),
+ Address, SrcAddr, SizeVal,
+ llvm::ConstantInt::get(Int32Ty, 1),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ return RValue::get(Address);
+ }
+
+ case Builtin::BI__builtin_objc_memmove_collectable: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
+ Address, SrcAddr, SizeVal);
+ return RValue::get(Address);
+ }
+
+ case Builtin::BImemmove:
+ case Builtin::BI__builtin_memmove: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ Builder.CreateCall5(CGM.getMemMoveFn(Address->getType(), SrcAddr->getType(),
+ SizeVal->getType()),
+ Address, SrcAddr, SizeVal,
+ llvm::ConstantInt::get(Int32Ty, 1),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ return RValue::get(Address);
+ }
+ case Builtin::BImemset:
+ case Builtin::BI__builtin_memset: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ Builder.CreateCall5(CGM.getMemSetFn(Address->getType(), SizeVal->getType()),
+ Address,
+ Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ llvm::Type::getInt8Ty(VMContext)),
+ SizeVal,
+ llvm::ConstantInt::get(Int32Ty, 1),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_dwarf_cfa: {
+ // The offset in bytes from the first argument to the CFA.
+ //
+ // Why on earth is this in the frontend? Is there any reason at
+ // all that the backend can't reasonably determine this while
+ // lowering llvm.eh.dwarf.cfa()?
+ //
+ // TODO: If there's a satisfactory reason, add a target hook for
+ // this instead of hard-coding 0, which is correct for most targets.
+ int32_t Offset = 0;
+
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0);
+ return RValue::get(Builder.CreateCall(F,
+ llvm::ConstantInt::get(Int32Ty, Offset)));
+ }
+ case Builtin::BI__builtin_return_address: {
+ Value *Depth = EmitScalarExpr(E->getArg(0));
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
+ return RValue::get(Builder.CreateCall(F, Depth));
+ }
+ case Builtin::BI__builtin_frame_address: {
+ Value *Depth = EmitScalarExpr(E->getArg(0));
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
+ Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+ return RValue::get(Builder.CreateCall(F, Depth));
+ }
+ case Builtin::BI__builtin_extract_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_frob_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_dwarf_sp_column: {
+ const llvm::IntegerType *Ty
+ = cast<llvm::IntegerType>(ConvertType(E->getType()));
+ int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
+ if (Column == -1) {
+ CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
+ return RValue::get(llvm::UndefValue::get(Ty));
+ }
+ return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
+ }
+ case Builtin::BI__builtin_init_dwarf_reg_size_table: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
+ CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
+ return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_eh_return: {
+ Value *Int = EmitScalarExpr(E->getArg(0));
+ Value *Ptr = EmitScalarExpr(E->getArg(1));
+
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
+ assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
+ "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
+ Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
+ ? Intrinsic::eh_return_i32
+ : Intrinsic::eh_return_i64,
+ 0, 0);
+ Builder.CreateCall2(F, Int, Ptr);
+ Value *V = Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return RValue::get(V);
+ }
+ case Builtin::BI__builtin_unwind_init: {
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case Builtin::BI__builtin_extend_pointer: {
+ // Extends a pointer to the size of an _Unwind_Word, which is
+ // uint64_t on all platforms. Generally this gets poked into a
+ // register and eventually used as an address, so if the
+ // addressing registers are wider than pointers and the platform
+ // doesn't implicitly ignore high-order bits when doing
+ // addressing, we need to make sure we zext / sext based on
+ // the platform's expectations.
+ //
+ // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
+
+ LLVMContext &C = CGM.getLLVMContext();
+
+ // Cast the pointer to intptr_t.
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ const llvm::IntegerType *IntPtrTy = CGM.getTargetData().getIntPtrType(C);
+ Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
+
+ // If that's 64 bits, we're done.
+ if (IntPtrTy->getBitWidth() == 64)
+ return RValue::get(Result);
+
+ // Otherwise, ask the codegen data what to do.
+ if (getTargetHooks().extendPointerWithSExt())
+ return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
+ else
+ return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
+ }
+ case Builtin::BI__builtin_setjmp: {
+ // Buffer is a void**.
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+
+ // Store the frame pointer to the setjmp buffer.
+ Value *FrameAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
+ ConstantInt::get(Int32Ty, 0));
+ Builder.CreateStore(FrameAddr, Buf);
+
+ // Store the stack pointer to the setjmp buffer.
+ Value *StackAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
+ Value *StackSaveSlot =
+ Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
+ Builder.CreateStore(StackAddr, StackSaveSlot);
+
+ // Call LLVM's EH setjmp, which is lightweight.
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
+ Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext));
+ return RValue::get(Builder.CreateCall(F, Buf));
+ }
+ case Builtin::BI__builtin_longjmp: {
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+ Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext));
+
+ // Call LLVM's EH longjmp, which is lightweight.
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
+
+ // longjmp doesn't return; mark this as unreachable
+ Value *V = Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return RValue::get(V);
+ }
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ assert(0 && "Shouldn't make it through sema");
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
+
+ // Clang extensions: not overloaded yet.
+ case Builtin::BI__sync_fetch_and_min:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
+ case Builtin::BI__sync_fetch_and_max:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
+ case Builtin::BI__sync_fetch_and_umin:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
+ case Builtin::BI__sync_fetch_and_umax:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
+
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
+ llvm::Instruction::Add);
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
+ llvm::Instruction::Sub);
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
+ llvm::Instruction::And);
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
+ llvm::Instruction::Or);
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
+ llvm::Instruction::Xor);
+
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16: {
+ const llvm::Type *ValueType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(E->getType()));
+ const llvm::Type *PtrType = ValueType->getPointerTo();
+ const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
+ IntrinsicTypes, 2);
+
+ Value *Args[3] = { Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
+ PtrType),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(1))),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(2))) };
+ return RValue::get(EmitCastFromInt(CGF, E->getType(),
+ EmitCallWithBarrier(CGF, AtomF, Args,
+ Args + 3)));
+ }
+
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16: {
+ const llvm::Type *ValueType =
+ llvm::IntegerType::get(
+ CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(E->getArg(1)->getType()));
+ const llvm::Type *PtrType = ValueType->getPointerTo();
+ const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
+ IntrinsicTypes, 2);
+
+ Value *Args[3] = { Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
+ PtrType),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(1))),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(2))) };
+ Value *OldVal = Args[1];
+ Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
+ Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
+ // zext bool to int.
+ return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ElTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ llvm::StoreInst *Store =
+ Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr);
+ Store->setVolatile(true);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__sync_synchronize: {
+ // We assume like gcc appears to, that this only applies to cached memory.
+ EmitMemoryBarrier(*this, true, true, true, true, false);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__builtin_llvm_memory_barrier: {
+ Value *C[5] = {
+ EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)),
+ EmitScalarExpr(E->getArg(4))
+ };
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
+ return RValue::get(0);
+ }
+
+ // Library functions with special handling.
+ case Builtin::BIsqrt:
+ case Builtin::BIsqrtf:
+ case Builtin::BIsqrtl: {
+ // TODO: there is currently no set of optimizer flags
+ // sufficient for us to rewrite sqrt to @llvm.sqrt.
+ // -fmath-errno=0 is not good enough; we need finiteness.
+ // We could probably precondition the call with an ult
+ // against 0, but is that worth the complexity?
+ break;
+ }
+
+ case Builtin::BIpow:
+ case Builtin::BIpowf:
+ case Builtin::BIpowl: {
+ // Rewrite sqrt to intrinsic if allowed.
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ }
+
+ case Builtin::BI__builtin_signbit:
+ case Builtin::BI__builtin_signbitf:
+ case Builtin::BI__builtin_signbitl: {
+ LLVMContext &C = CGM.getLLVMContext();
+
+ Value *Arg = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ArgTy = Arg->getType();
+ if (ArgTy->isPPC_FP128Ty())
+ break; // FIXME: I'm not sure what the right implementation is here.
+ int ArgWidth = ArgTy->getPrimitiveSizeInBits();
+ const llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
+ Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
+ Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
+ Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
+ return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+ }
+ }
+
+ // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
+ // that function.
+ if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
+ getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return EmitCall(E->getCallee()->getType(),
+ CGM.getBuiltinLibFunction(FD, BuiltinID),
+ ReturnValueSlot(),
+ E->arg_begin(), E->arg_end());
+
+ // See if we have a target specific intrinsic.
+ const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
+ Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
+ if (const char *Prefix =
+ llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
+ IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
+
+ if (IntrinsicID != Intrinsic::not_intrinsic) {
+ SmallVector<Value*, 16> Args;
+
+ Function *F = CGM.getIntrinsic(IntrinsicID);
+ const llvm::FunctionType *FTy = F->getFunctionType();
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ Value *ArgValue = EmitScalarExpr(E->getArg(i));
+
+ // If the intrinsic arg type is different from the builtin arg type
+ // we need to do a bit cast.
+ const llvm::Type *PTy = FTy->getParamType(i);
+ if (PTy != ArgValue->getType()) {
+ assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
+ "Must be able to losslessly bit cast to param");
+ ArgValue = Builder.CreateBitCast(ArgValue, PTy);
+ }
+
+ Args.push_back(ArgValue);
+ }
+
+ Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
+ QualType BuiltinRetType = E->getType();
+
+ const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
+ if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
+
+ if (RetTy != V->getType()) {
+ assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
+ "Must be able to losslessly bit cast result type");
+ V = Builder.CreateBitCast(V, RetTy);
+ }
+
+ return RValue::get(V);
+ }
+
+ // See if we have a target specific builtin that needs to be lowered.
+ if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
+ return RValue::get(V);
+
+ ErrorUnsupported(E, "builtin function");
+
+ // Unknown builtin, for now just dump it out and return undef.
+ if (hasAggregateLLVMType(E->getType()))
+ return RValue::getAggregate(CreateMemTemp(E->getType()));
+ return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+}
+
+Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (Target.getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ return EmitARMBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return EmitX86BuiltinExpr(BuiltinID, E);
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ return EmitPPCBuiltinExpr(BuiltinID, E);
+ default:
+ return 0;
+ }
+}
+
+const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
+ switch (type) {
+ default: break;
+ case 0:
+ case 5: return llvm::VectorType::get(llvm::Type::getInt8Ty(C), 8 << (int)q);
+ case 6:
+ case 7:
+ case 1: return llvm::VectorType::get(llvm::Type::getInt16Ty(C),4 << (int)q);
+ case 2: return llvm::VectorType::get(llvm::Type::getInt32Ty(C),2 << (int)q);
+ case 3: return llvm::VectorType::get(llvm::Type::getInt64Ty(C),1 << (int)q);
+ case 4: return llvm::VectorType::get(llvm::Type::getFloatTy(C),2 << (int)q);
+ };
+ return 0;
+}
+
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C, bool widen) {
+ unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
+ if (widen)
+ nElts <<= 1;
+ SmallVector<Constant*, 16> Indices(nElts, C);
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(V, V, SV, "lane");
+}
+
+Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
+ const char *name, bool splat,
+ unsigned shift, bool rightshift) {
+ unsigned j = 0;
+ for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
+ ai != ae; ++ai, ++j)
+ if (shift > 0 && shift == j)
+ Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
+ else
+ Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
+
+ if (splat) {
+ Ops[j-1] = EmitNeonSplat(Ops[j-1], cast<Constant>(Ops[j]));
+ Ops.resize(j);
+ }
+ return Builder.CreateCall(F, Ops.begin(), Ops.end(), name);
+}
+
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
+ bool neg) {
+ ConstantInt *CI = cast<ConstantInt>(V);
+ int SV = CI->getSExtValue();
+
+ const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
+ SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C);
+ return llvm::ConstantVector::get(CV.begin(), CV.size());
+}
+
+/// GetPointeeAlignment - Given an expression with a pointer type, find the
+/// alignment of the type referenced by the pointer. Skip over implicit
+/// casts.
+static Value *GetPointeeAlignment(CodeGenFunction &CGF, const Expr *Addr) {
+ unsigned Align = 1;
+ // Check if the type is a pointer. The implicit cast operand might not be.
+ while (Addr->getType()->isPointerType()) {
+ QualType PtTy = Addr->getType()->getPointeeType();
+ unsigned NewA = CGF.getContext().getTypeAlignInChars(PtTy).getQuantity();
+ if (NewA > Align)
+ Align = NewA;
+
+ // If the address is an implicit cast, repeat with the cast operand.
+ if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) {
+ Addr = CastAddr->getSubExpr();
+ continue;
+ }
+ break;
+ }
+ return llvm::ConstantInt::get(CGF.Int32Ty, Align);
+}
+
+Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (BuiltinID == ARM::BI__clear_cache) {
+ const FunctionDecl *FD = E->getDirectCallee();
+ Value *a = EmitScalarExpr(E->getArg(0));
+ Value *b = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ llvm::StringRef Name = FD->getName();
+ return Builder.CreateCall2(CGM.CreateRuntimeFunction(FTy, Name),
+ a, b);
+ }
+
+ llvm::SmallVector<Value*, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ llvm::APSInt Result;
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ if (!Arg->isIntegerConstantExpr(Result, getContext()))
+ return 0;
+
+ if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
+ BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
+ // Determine the overloaded type of this builtin.
+ const llvm::Type *Ty;
+ if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
+ Ty = llvm::Type::getFloatTy(VMContext);
+ else
+ Ty = llvm::Type::getDoubleTy(VMContext);
+
+ // Determine whether this is an unsigned conversion or not.
+ bool usgn = Result.getZExtValue() == 1;
+ unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
+
+ // Call the appropriate intrinsic.
+ Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ return Builder.CreateCall(F, Ops.begin(), Ops.end(), "vcvtr");
+ }
+
+ // Determine the type of this overloaded NEON intrinsic.
+ unsigned type = Result.getZExtValue();
+ bool usgn = type & 0x08;
+ bool quad = type & 0x10;
+ bool poly = (type & 0x7) == 5 || (type & 0x7) == 6;
+ bool splat = false;
+
+ const llvm::VectorType *VTy = GetNeonType(VMContext, type & 0x7, quad);
+ const llvm::Type *Ty = VTy;
+ if (!Ty)
+ return 0;
+
+ unsigned Int;
+ switch (BuiltinID) {
+ default: return 0;
+ case ARM::BI__builtin_neon_vaba_v:
+ case ARM::BI__builtin_neon_vabaq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ SmallVector<Value*, 2> Args;
+ Args.push_back(Ops[1]);
+ Args.push_back(Ops[2]);
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ Ops[1] = EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Args, "vaba");
+ return Builder.CreateAdd(Ops[0], Ops[1], "vaba");
+ }
+ case ARM::BI__builtin_neon_vabal_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ SmallVector<Value*, 2> Args;
+ Args.push_back(Ops[1]);
+ Args.push_back(Ops[2]);
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[1] = EmitNeonCall(CGM.getIntrinsic(Int, &DTy, 1), Args, "vabal");
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vabal");
+ }
+ case ARM::BI__builtin_neon_vabd_v:
+ case ARM::BI__builtin_neon_vabdq_v:
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd");
+ case ARM::BI__builtin_neon_vabdl_v: {
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, &DTy, 1), Ops, "vabdl");
+ return Builder.CreateZExt(Ops[0], Ty, "vabdl");
+ }
+ case ARM::BI__builtin_neon_vabs_v:
+ case ARM::BI__builtin_neon_vabsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1),
+ Ops, "vabs");
+ case ARM::BI__builtin_neon_vaddhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1),
+ Ops, "vaddhn");
+ case ARM::BI__builtin_neon_vaddl_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ if (usgn) {
+ Ops[0] = Builder.CreateZExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ } else {
+ Ops[0] = Builder.CreateSExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ }
+ return Builder.CreateAdd(Ops[0], Ops[1], "vaddl");
+ }
+ case ARM::BI__builtin_neon_vaddw_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ if (usgn)
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ else
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vaddw");
+ }
+ case ARM::BI__builtin_neon_vcale_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcage_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case ARM::BI__builtin_neon_vcaleq_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcageq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case ARM::BI__builtin_neon_vcalt_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcagt_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case ARM::BI__builtin_neon_vcaltq_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcagtq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case ARM::BI__builtin_neon_vcls_v:
+ case ARM::BI__builtin_neon_vclsq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcls");
+ }
+ case ARM::BI__builtin_neon_vclz_v:
+ case ARM::BI__builtin_neon_vclzq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vclz");
+ }
+ case ARM::BI__builtin_neon_vcnt_v:
+ case ARM::BI__builtin_neon_vcntq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcnt");
+ }
+ // FIXME: intrinsics for f16<->f32 convert missing from ARM target.
+ case ARM::BI__builtin_neon_vcvt_f32_v:
+ case ARM::BI__builtin_neon_vcvtq_f32_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ty = GetNeonType(VMContext, 4, quad);
+ return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_s32_v:
+ case ARM::BI__builtin_neon_vcvt_u32_v:
+ case ARM::BI__builtin_neon_vcvtq_s32_v:
+ case ARM::BI__builtin_neon_vcvtq_u32_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(VMContext, 4, quad));
+ return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
+ : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_n_f32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
+ const llvm::Type *Tys[2] = { GetNeonType(VMContext, 4, quad), Ty };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp;
+ Function *F = CGM.getIntrinsic(Int, Tys, 2);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case ARM::BI__builtin_neon_vcvt_n_s32_v:
+ case ARM::BI__builtin_neon_vcvt_n_u32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_s32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
+ const llvm::Type *Tys[2] = { Ty, GetNeonType(VMContext, 4, quad) };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs;
+ Function *F = CGM.getIntrinsic(Int, Tys, 2);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case ARM::BI__builtin_neon_vdup_lane_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return EmitNeonSplat(Ops[0], cast<Constant>(Ops[1]));
+ case ARM::BI__builtin_neon_vdupq_lane_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return EmitNeonSplat(Ops[0], cast<Constant>(Ops[1]), true);
+ case ARM::BI__builtin_neon_vext_v:
+ case ARM::BI__builtin_neon_vextq_v: {
+ ConstantInt *C = dyn_cast<ConstantInt>(Ops[2]);
+ int CV = C->getSExtValue();
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
+ }
+ case ARM::BI__builtin_neon_vget_lane_i8:
+ case ARM::BI__builtin_neon_vget_lane_i16:
+ case ARM::BI__builtin_neon_vget_lane_i32:
+ case ARM::BI__builtin_neon_vget_lane_i64:
+ case ARM::BI__builtin_neon_vget_lane_f32:
+ case ARM::BI__builtin_neon_vgetq_lane_i8:
+ case ARM::BI__builtin_neon_vgetq_lane_i16:
+ case ARM::BI__builtin_neon_vgetq_lane_i32:
+ case ARM::BI__builtin_neon_vgetq_lane_i64:
+ case ARM::BI__builtin_neon_vgetq_lane_f32:
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case ARM::BI__builtin_neon_vhadd_v:
+ case ARM::BI__builtin_neon_vhaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhadd");
+ case ARM::BI__builtin_neon_vhsub_v:
+ case ARM::BI__builtin_neon_vhsubq_v:
+ Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhsub");
+ case ARM::BI__builtin_neon_vld1_v:
+ case ARM::BI__builtin_neon_vld1q_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, &Ty, 1),
+ Ops, "vld1");
+ case ARM::BI__builtin_neon_vld1_lane_v:
+ case ARM::BI__builtin_neon_vld1q_lane_v:
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[0] = Builder.CreateLoad(Ops[0]);
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
+ case ARM::BI__builtin_neon_vld1_dup_v:
+ case ARM::BI__builtin_neon_vld1q_dup_v: {
+ Value *V = UndefValue::get(Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[0] = Builder.CreateLoad(Ops[0]);
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
+ return EmitNeonSplat(Ops[0], CI);
+ }
+ case ARM::BI__builtin_neon_vld2_v:
+ case ARM::BI__builtin_neon_vld2q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, &Ty, 1);
+ Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld3_v:
+ case ARM::BI__builtin_neon_vld3q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, &Ty, 1);
+ Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld4_v:
+ case ARM::BI__builtin_neon_vld4q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, &Ty, 1);
+ Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld2_lane_v:
+ case ARM::BI__builtin_neon_vld2q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, &Ty, 1);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld2_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld3_lane_v:
+ case ARM::BI__builtin_neon_vld3q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, &Ty, 1);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld4_lane_v:
+ case ARM::BI__builtin_neon_vld4q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, &Ty, 1);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ case ARM::BI__builtin_neon_vld4_dup_v: {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ default: assert(0 && "unknown vld_dup intrinsic?");
+ }
+ Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
+
+ SmallVector<Value*, 6> Args;
+ Args.push_back(Ops[1]);
+ Args.append(STy->getNumElements(), UndefValue::get(Ty));
+
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Args.push_back(CI);
+ Args.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+
+ Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup");
+ // splat lane 0 to all elts in each vector of the result.
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Value *Val = Builder.CreateExtractValue(Ops[1], i);
+ Value *Elt = Builder.CreateBitCast(Val, Ty);
+ Elt = EmitNeonSplat(Elt, CI);
+ Elt = Builder.CreateBitCast(Elt, Val->getType());
+ Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
+ }
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vmax_v:
+ case ARM::BI__builtin_neon_vmaxq_v:
+ Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmax");
+ case ARM::BI__builtin_neon_vmin_v:
+ case ARM::BI__builtin_neon_vminq_v:
+ Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin");
+ case ARM::BI__builtin_neon_vmlal_lane_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
+ Ops[2] = EmitNeonSplat(Ops[2], cast<Constant>(Ops[3]));
+ }
+ case ARM::BI__builtin_neon_vmlal_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
+ if (usgn) {
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ Ops[2] = Builder.CreateZExt(Ops[2], Ty);
+ } else {
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ Ops[2] = Builder.CreateSExt(Ops[2], Ty);
+ }
+ Ops[1] = Builder.CreateMul(Ops[1], Ops[2]);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vmlal");
+ }
+ case ARM::BI__builtin_neon_vmlsl_lane_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
+ Ops[2] = EmitNeonSplat(Ops[2], cast<Constant>(Ops[3]));
+ }
+ case ARM::BI__builtin_neon_vmlsl_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
+ if (usgn) {
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ Ops[2] = Builder.CreateZExt(Ops[2], Ty);
+ } else {
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ Ops[2] = Builder.CreateSExt(Ops[2], Ty);
+ }
+ Ops[1] = Builder.CreateMul(Ops[1], Ops[2]);
+ return Builder.CreateSub(Ops[0], Ops[1], "vmlsl");
+ }
+ case ARM::BI__builtin_neon_vmovl_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ if (usgn)
+ return Builder.CreateZExt(Ops[0], Ty, "vmovl");
+ return Builder.CreateSExt(Ops[0], Ty, "vmovl");
+ }
+ case ARM::BI__builtin_neon_vmovn_v: {
+ const llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
+ return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
+ }
+ case ARM::BI__builtin_neon_vmull_lane_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ Ops[1] = EmitNeonSplat(Ops[1], cast<Constant>(Ops[2]));
+ }
+ case ARM::BI__builtin_neon_vmull_v: {
+ if (poly)
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmullp, &Ty, 1),
+ Ops, "vmull");
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ if (usgn) {
+ Ops[0] = Builder.CreateZExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ } else {
+ Ops[0] = Builder.CreateSExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ }
+ return Builder.CreateMul(Ops[0], Ops[1], "vmull");
+ }
+ case ARM::BI__builtin_neon_vpadal_v:
+ case ARM::BI__builtin_neon_vpadalq_v:
+ Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpadal");
+ case ARM::BI__builtin_neon_vpadd_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, &Ty, 1),
+ Ops, "vpadd");
+ case ARM::BI__builtin_neon_vpaddl_v:
+ case ARM::BI__builtin_neon_vpaddlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpaddl");
+ case ARM::BI__builtin_neon_vpmax_v:
+ Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmax");
+ case ARM::BI__builtin_neon_vpmin_v:
+ Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmin");
+ case ARM::BI__builtin_neon_vqabs_v:
+ case ARM::BI__builtin_neon_vqabsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, &Ty, 1),
+ Ops, "vqabs");
+ case ARM::BI__builtin_neon_vqadd_v:
+ case ARM::BI__builtin_neon_vqaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqadd");
+ case ARM::BI__builtin_neon_vqdmlal_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmlal_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, &Ty, 1),
+ Ops, "vqdmlal", splat);
+ case ARM::BI__builtin_neon_vqdmlsl_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmlsl_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, &Ty, 1),
+ Ops, "vqdmlsl", splat);
+ case ARM::BI__builtin_neon_vqdmulh_lane_v:
+ case ARM::BI__builtin_neon_vqdmulhq_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmulh_v:
+ case ARM::BI__builtin_neon_vqdmulhq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, &Ty, 1),
+ Ops, "vqdmulh", splat);
+ case ARM::BI__builtin_neon_vqdmull_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmull_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, &Ty, 1),
+ Ops, "vqdmull", splat);
+ case ARM::BI__builtin_neon_vqmovn_v:
+ Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqmovn");
+ case ARM::BI__builtin_neon_vqmovun_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, &Ty, 1),
+ Ops, "vqdmull");
+ case ARM::BI__builtin_neon_vqneg_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, &Ty, 1),
+ Ops, "vqneg");
+ case ARM::BI__builtin_neon_vqrdmulh_lane_v:
+ case ARM::BI__builtin_neon_vqrdmulhq_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqrdmulh_v:
+ case ARM::BI__builtin_neon_vqrdmulhq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, &Ty, 1),
+ Ops, "vqrdmulh", splat);
+ case ARM::BI__builtin_neon_vqrshl_v:
+ case ARM::BI__builtin_neon_vqrshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshl");
+ case ARM::BI__builtin_neon_vqrshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n", false,
+ 1, true);
+ case ARM::BI__builtin_neon_vqrshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, &Ty, 1),
+ Ops, "vqrshrun_n", false, 1, true);
+ case ARM::BI__builtin_neon_vqshl_v:
+ case ARM::BI__builtin_neon_vqshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl");
+ case ARM::BI__builtin_neon_vqshl_n_v:
+ case ARM::BI__builtin_neon_vqshlq_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n", false,
+ 1, false);
+ case ARM::BI__builtin_neon_vqshlu_n_v:
+ case ARM::BI__builtin_neon_vqshluq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, &Ty, 1),
+ Ops, "vqshlu", 1, false);
+ case ARM::BI__builtin_neon_vqshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n", false,
+ 1, true);
+ case ARM::BI__builtin_neon_vqshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, &Ty, 1),
+ Ops, "vqshrun_n", false, 1, true);
+ case ARM::BI__builtin_neon_vqsub_v:
+ case ARM::BI__builtin_neon_vqsubq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqsub");
+ case ARM::BI__builtin_neon_vraddhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, &Ty, 1),
+ Ops, "vraddhn");
+ case ARM::BI__builtin_neon_vrecpe_v:
+ case ARM::BI__builtin_neon_vrecpeq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, &Ty, 1),
+ Ops, "vrecpe");
+ case ARM::BI__builtin_neon_vrecps_v:
+ case ARM::BI__builtin_neon_vrecpsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, &Ty, 1),
+ Ops, "vrecps");
+ case ARM::BI__builtin_neon_vrhadd_v:
+ case ARM::BI__builtin_neon_vrhaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrhadd");
+ case ARM::BI__builtin_neon_vrshl_v:
+ case ARM::BI__builtin_neon_vrshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshl");
+ case ARM::BI__builtin_neon_vrshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, &Ty, 1),
+ Ops, "vrshrn_n", false, 1, true);
+ case ARM::BI__builtin_neon_vrshr_n_v:
+ case ARM::BI__builtin_neon_vrshrq_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", false,
+ 1, true);
+ case ARM::BI__builtin_neon_vrsqrte_v:
+ case ARM::BI__builtin_neon_vrsqrteq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, &Ty, 1),
+ Ops, "vrsqrte");
+ case ARM::BI__builtin_neon_vrsqrts_v:
+ case ARM::BI__builtin_neon_vrsqrtsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, &Ty, 1),
+ Ops, "vrsqrts");
+ case ARM::BI__builtin_neon_vrsra_n_v:
+ case ARM::BI__builtin_neon_vrsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, &Ty, 1), Ops[1], Ops[2]);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
+ case ARM::BI__builtin_neon_vrsubhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, &Ty, 1),
+ Ops, "vrsubhn");
+ case ARM::BI__builtin_neon_vset_lane_i8:
+ case ARM::BI__builtin_neon_vset_lane_i16:
+ case ARM::BI__builtin_neon_vset_lane_i32:
+ case ARM::BI__builtin_neon_vset_lane_i64:
+ case ARM::BI__builtin_neon_vset_lane_f32:
+ case ARM::BI__builtin_neon_vsetq_lane_i8:
+ case ARM::BI__builtin_neon_vsetq_lane_i16:
+ case ARM::BI__builtin_neon_vsetq_lane_i32:
+ case ARM::BI__builtin_neon_vsetq_lane_i64:
+ case ARM::BI__builtin_neon_vsetq_lane_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+ case ARM::BI__builtin_neon_vshl_v:
+ case ARM::BI__builtin_neon_vshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshl");
+ case ARM::BI__builtin_neon_vshll_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", false, 1);
+ case ARM::BI__builtin_neon_vshl_n_v:
+ case ARM::BI__builtin_neon_vshlq_n_v:
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
+ case ARM::BI__builtin_neon_vshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, &Ty, 1),
+ Ops, "vshrn_n", false, 1, true);
+ case ARM::BI__builtin_neon_vshr_n_v:
+ case ARM::BI__builtin_neon_vshrq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ if (usgn)
+ return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
+ else
+ return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
+ case ARM::BI__builtin_neon_vsri_n_v:
+ case ARM::BI__builtin_neon_vsriq_n_v:
+ poly = true;
+ case ARM::BI__builtin_neon_vsli_n_v:
+ case ARM::BI__builtin_neon_vsliq_n_v:
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, poly);
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, &Ty, 1),
+ Ops, "vsli_n");
+ case ARM::BI__builtin_neon_vsra_n_v:
+ case ARM::BI__builtin_neon_vsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
+ if (usgn)
+ Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
+ else
+ Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
+ return Builder.CreateAdd(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vst1_v:
+ case ARM::BI__builtin_neon_vst1q_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst1_lane_v:
+ case ARM::BI__builtin_neon_vst1q_lane_v:
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
+ case ARM::BI__builtin_neon_vst2_v:
+ case ARM::BI__builtin_neon_vst2q_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst2_lane_v:
+ case ARM::BI__builtin_neon_vst2q_lane_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst3_v:
+ case ARM::BI__builtin_neon_vst3q_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst3_lane_v:
+ case ARM::BI__builtin_neon_vst3q_lane_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst4_v:
+ case ARM::BI__builtin_neon_vst4q_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst4_lane_v:
+ case ARM::BI__builtin_neon_vst4q_lane_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vsubhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1),
+ Ops, "vsubhn");
+ case ARM::BI__builtin_neon_vsubl_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ if (usgn) {
+ Ops[0] = Builder.CreateZExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ } else {
+ Ops[0] = Builder.CreateSExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ }
+ return Builder.CreateSub(Ops[0], Ops[1], "vsubl");
+ }
+ case ARM::BI__builtin_neon_vsubw_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ if (usgn)
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ else
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ return Builder.CreateSub(Ops[0], Ops[1], "vsubw");
+ }
+ case ARM::BI__builtin_neon_vtbl1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
+ Ops, "vtbl1");
+ case ARM::BI__builtin_neon_vtbl2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
+ Ops, "vtbl2");
+ case ARM::BI__builtin_neon_vtbl3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
+ Ops, "vtbl3");
+ case ARM::BI__builtin_neon_vtbl4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
+ Ops, "vtbl4");
+ case ARM::BI__builtin_neon_vtbx1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
+ Ops, "vtbx1");
+ case ARM::BI__builtin_neon_vtbx2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
+ Ops, "vtbx2");
+ case ARM::BI__builtin_neon_vtbx3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
+ Ops, "vtbx3");
+ case ARM::BI__builtin_neon_vtbx4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
+ Ops, "vtbx4");
+ case ARM::BI__builtin_neon_vtst_v:
+ case ARM::BI__builtin_neon_vtstq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
+ Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
+ ConstantAggregateZero::get(Ty));
+ return Builder.CreateSExt(Ops[0], Ty, "vtst");
+ }
+ case ARM::BI__builtin_neon_vtrn_v:
+ case ARM::BI__builtin_neon_vtrnq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
+ Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ case ARM::BI__builtin_neon_vuzp_v:
+ case ARM::BI__builtin_neon_vuzpq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
+
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ case ARM::BI__builtin_neon_vzip_v:
+ case ARM::BI__builtin_neon_vzipq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
+ Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ }
+}
+
+Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ switch (BuiltinID) {
+ default: return 0;
+ case X86::BI__builtin_ia32_pslldi128:
+ case X86::BI__builtin_ia32_psllqi128:
+ case X86::BI__builtin_ia32_psllwi128:
+ case X86::BI__builtin_ia32_psradi128:
+ case X86::BI__builtin_ia32_psrawi128:
+ case X86::BI__builtin_ia32_psrldi128:
+ case X86::BI__builtin_ia32_psrlqi128:
+ case X86::BI__builtin_ia32_psrlwi128: {
+ Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
+ llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
+ Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
+ Ops[1], Zero, "insert");
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported shift intrinsic!");
+ case X86::BI__builtin_ia32_pslldi128:
+ name = "pslldi";
+ ID = Intrinsic::x86_sse2_psll_d;
+ break;
+ case X86::BI__builtin_ia32_psllqi128:
+ name = "psllqi";
+ ID = Intrinsic::x86_sse2_psll_q;
+ break;
+ case X86::BI__builtin_ia32_psllwi128:
+ name = "psllwi";
+ ID = Intrinsic::x86_sse2_psll_w;
+ break;
+ case X86::BI__builtin_ia32_psradi128:
+ name = "psradi";
+ ID = Intrinsic::x86_sse2_psra_d;
+ break;
+ case X86::BI__builtin_ia32_psrawi128:
+ name = "psrawi";
+ ID = Intrinsic::x86_sse2_psra_w;
+ break;
+ case X86::BI__builtin_ia32_psrldi128:
+ name = "psrldi";
+ ID = Intrinsic::x86_sse2_psrl_d;
+ break;
+ case X86::BI__builtin_ia32_psrlqi128:
+ name = "psrlqi";
+ ID = Intrinsic::x86_sse2_psrl_q;
+ break;
+ case X86::BI__builtin_ia32_psrlwi128:
+ name = "psrlwi";
+ ID = Intrinsic::x86_sse2_psrl_w;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ }
+ case X86::BI__builtin_ia32_pslldi:
+ case X86::BI__builtin_ia32_psllqi:
+ case X86::BI__builtin_ia32_psllwi:
+ case X86::BI__builtin_ia32_psradi:
+ case X86::BI__builtin_ia32_psrawi:
+ case X86::BI__builtin_ia32_psrldi:
+ case X86::BI__builtin_ia32_psrlqi:
+ case X86::BI__builtin_ia32_psrlwi: {
+ Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported shift intrinsic!");
+ case X86::BI__builtin_ia32_pslldi:
+ name = "pslldi";
+ ID = Intrinsic::x86_mmx_psll_d;
+ break;
+ case X86::BI__builtin_ia32_psllqi:
+ name = "psllqi";
+ ID = Intrinsic::x86_mmx_psll_q;
+ break;
+ case X86::BI__builtin_ia32_psllwi:
+ name = "psllwi";
+ ID = Intrinsic::x86_mmx_psll_w;
+ break;
+ case X86::BI__builtin_ia32_psradi:
+ name = "psradi";
+ ID = Intrinsic::x86_mmx_psra_d;
+ break;
+ case X86::BI__builtin_ia32_psrawi:
+ name = "psrawi";
+ ID = Intrinsic::x86_mmx_psra_w;
+ break;
+ case X86::BI__builtin_ia32_psrldi:
+ name = "psrldi";
+ ID = Intrinsic::x86_mmx_psrl_d;
+ break;
+ case X86::BI__builtin_ia32_psrlqi:
+ name = "psrlqi";
+ ID = Intrinsic::x86_mmx_psrl_q;
+ break;
+ case X86::BI__builtin_ia32_psrlwi:
+ name = "psrlwi";
+ ID = Intrinsic::x86_mmx_psrl_w;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ }
+ case X86::BI__builtin_ia32_cmpps: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
+ }
+ case X86::BI__builtin_ia32_cmpss: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
+ }
+ case X86::BI__builtin_ia32_ldmxcsr: {
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
+ Builder.CreateStore(Ops[0], Tmp);
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ }
+ case X86::BI__builtin_ia32_stmxcsr: {
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
+ One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ return Builder.CreateLoad(Tmp, "stmxcsr");
+ }
+ case X86::BI__builtin_ia32_cmppd: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
+ }
+ case X86::BI__builtin_ia32_cmpsd: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
+ }
+ case X86::BI__builtin_ia32_storehps:
+ case X86::BI__builtin_ia32_storelps: {
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+
+ // cast val v2i64
+ Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
+
+ // extract (0, 1)
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
+
+ // cast pointer to i64 & store
+ Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case X86::BI__builtin_ia32_palignr: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 9 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 8) {
+ llvm::SmallVector<llvm::Constant*, 8> Indices;
+ for (unsigned i = 0; i != 8; ++i)
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
+
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 8 but less
+ // than 16 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 16) {
+ // MMX has these as 1 x i64 vectors for some odd optimization reasons.
+ const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ case X86::BI__builtin_ia32_palignr128: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ llvm::SmallVector<llvm::Constant*, 16> Indices;
+ for (unsigned i = 0; i != 16; ++i)
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
+
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 32) {
+ const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ }
+}
+
+Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: return 0;
+
+ // vec_ld, vec_lvsl, vec_lvsr
+ case PPC::BI__builtin_altivec_lvx:
+ case PPC::BI__builtin_altivec_lvxl:
+ case PPC::BI__builtin_altivec_lvebx:
+ case PPC::BI__builtin_altivec_lvehx:
+ case PPC::BI__builtin_altivec_lvewx:
+ case PPC::BI__builtin_altivec_lvsl:
+ case PPC::BI__builtin_altivec_lvsr:
+ {
+ Ops[1] = Builder.CreateBitCast(Ops[1], llvm::Type::getInt8PtrTy(VMContext));
+
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp");
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported ld/lvsl/lvsr intrinsic!");
+ case PPC::BI__builtin_altivec_lvx:
+ ID = Intrinsic::ppc_altivec_lvx;
+ break;
+ case PPC::BI__builtin_altivec_lvxl:
+ ID = Intrinsic::ppc_altivec_lvxl;
+ break;
+ case PPC::BI__builtin_altivec_lvebx:
+ ID = Intrinsic::ppc_altivec_lvebx;
+ break;
+ case PPC::BI__builtin_altivec_lvehx:
+ ID = Intrinsic::ppc_altivec_lvehx;
+ break;
+ case PPC::BI__builtin_altivec_lvewx:
+ ID = Intrinsic::ppc_altivec_lvewx;
+ break;
+ case PPC::BI__builtin_altivec_lvsl:
+ ID = Intrinsic::ppc_altivec_lvsl;
+ break;
+ case PPC::BI__builtin_altivec_lvsr:
+ ID = Intrinsic::ppc_altivec_lvsr;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ }
+
+ // vec_st
+ case PPC::BI__builtin_altivec_stvx:
+ case PPC::BI__builtin_altivec_stvxl:
+ case PPC::BI__builtin_altivec_stvebx:
+ case PPC::BI__builtin_altivec_stvehx:
+ case PPC::BI__builtin_altivec_stvewx:
+ {
+ Ops[2] = Builder.CreateBitCast(Ops[2], llvm::Type::getInt8PtrTy(VMContext));
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp");
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported st intrinsic!");
+ case PPC::BI__builtin_altivec_stvx:
+ ID = Intrinsic::ppc_altivec_stvx;
+ break;
+ case PPC::BI__builtin_altivec_stvxl:
+ ID = Intrinsic::ppc_altivec_stvxl;
+ break;
+ case PPC::BI__builtin_altivec_stvebx:
+ ID = Intrinsic::ppc_altivec_stvebx;
+ break;
+ case PPC::BI__builtin_altivec_stvehx:
+ ID = Intrinsic::ppc_altivec_stvehx;
+ break;
+ case PPC::BI__builtin_altivec_stvewx:
+ ID = Intrinsic::ppc_altivec_stvewx;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ }
+ }
+ return 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
new file mode 100644
index 0000000..179716f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
@@ -0,0 +1,471 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation.
+//
+//===----------------------------------------------------------------------===//
+
+// We might split this into multiple files if it gets too unwieldy
+
+#include "CGCXXABI.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace CodeGen;
+
+/// Determines whether the given function has a trivial body that does
+/// not require any specific codegen.
+static bool HasTrivialBody(const FunctionDecl *FD) {
+ Stmt *S = FD->getBody();
+ if (!S)
+ return true;
+ if (isa<CompoundStmt>(S) && cast<CompoundStmt>(S)->body_empty())
+ return true;
+ return false;
+}
+
+/// Try to emit a base destructor as an alias to its primary
+/// base-class destructor.
+bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // If the destructor doesn't have a trivial body, we have to emit it
+ // separately.
+ if (!HasTrivialBody(D))
+ return true;
+
+ const CXXRecordDecl *Class = D->getParent();
+
+ // If we need to manipulate a VTT parameter, give up.
+ if (Class->getNumVBases()) {
+ // Extra Credit: passing extra parameters is perfectly safe
+ // in many calling conventions, so only bail out if the ctor's
+ // calling convention is nonstandard.
+ return true;
+ }
+
+ // If any fields have a non-trivial destructor, we have to emit it
+ // separately.
+ for (CXXRecordDecl::field_iterator I = Class->field_begin(),
+ E = Class->field_end(); I != E; ++I)
+ if (const RecordType *RT = (*I)->getType()->getAs<RecordType>())
+ if (!cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor())
+ return true;
+
+ // Try to find a unique base class with a non-trivial destructor.
+ const CXXRecordDecl *UniqueBase = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+ E = Class->bases_end(); I != E; ++I) {
+
+ // We're in the base destructor, so skip virtual bases.
+ if (I->isVirtual()) continue;
+
+ // Skip base classes with trivial destructors.
+ const CXXRecordDecl *Base
+ = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ if (Base->hasTrivialDestructor()) continue;
+
+ // If we've already found a base class with a non-trivial
+ // destructor, give up.
+ if (UniqueBase) return true;
+ UniqueBase = Base;
+ }
+
+ // If we didn't find any bases with a non-trivial destructor, then
+ // the base destructor is actually effectively trivial, which can
+ // happen if it was needlessly user-defined or if there are virtual
+ // bases with non-trivial destructors.
+ if (!UniqueBase)
+ return true;
+
+ /// If we don't have a definition for the destructor yet, don't
+ /// emit. We can't emit aliases to declarations; that's just not
+ /// how aliases work.
+ const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
+ if (!BaseD->isImplicit() && !BaseD->hasBody())
+ return true;
+
+ // If the base is at a non-zero offset, give up.
+ const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
+ if (ClassLayout.getBaseClassOffset(UniqueBase) != 0)
+ return true;
+
+ return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
+ GlobalDecl(BaseD, Dtor_Base));
+}
+
+/// Try to emit a definition as a global alias for another definition.
+bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
+ GlobalDecl TargetDecl) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // The alias will use the linkage of the referrent. If we can't
+ // support aliases with that linkage, fail.
+ llvm::GlobalValue::LinkageTypes Linkage
+ = getFunctionLinkage(cast<FunctionDecl>(AliasDecl.getDecl()));
+
+ switch (Linkage) {
+ // We can definitely emit aliases to definitions with external linkage.
+ case llvm::GlobalValue::ExternalLinkage:
+ case llvm::GlobalValue::ExternalWeakLinkage:
+ break;
+
+ // Same with local linkage.
+ case llvm::GlobalValue::InternalLinkage:
+ case llvm::GlobalValue::PrivateLinkage:
+ case llvm::GlobalValue::LinkerPrivateLinkage:
+ break;
+
+ // We should try to support linkonce linkages.
+ case llvm::GlobalValue::LinkOnceAnyLinkage:
+ case llvm::GlobalValue::LinkOnceODRLinkage:
+ return true;
+
+ // Other linkages will probably never be supported.
+ default:
+ return true;
+ }
+
+ llvm::GlobalValue::LinkageTypes TargetLinkage
+ = getFunctionLinkage(cast<FunctionDecl>(TargetDecl.getDecl()));
+
+ if (llvm::GlobalValue::isWeakForLinker(TargetLinkage))
+ return true;
+
+ // Derive the type for the alias.
+ const llvm::PointerType *AliasType
+ = getTypes().GetFunctionType(AliasDecl)->getPointerTo();
+
+ // Find the referrent. Some aliases might require a bitcast, in
+ // which case the caller is responsible for ensuring the soundness
+ // of these semantics.
+ llvm::GlobalValue *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
+ llvm::Constant *Aliasee = Ref;
+ if (Ref->getType() != AliasType)
+ Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
+
+ // Create the alias with no name.
+ llvm::GlobalAlias *Alias =
+ new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule());
+
+ // Switch any previous uses to the alias.
+ llvm::StringRef MangledName = getMangledName(AliasDecl);
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ assert(Entry->isDeclaration() && "definition already exists for alias");
+ assert(Entry->getType() == AliasType &&
+ "declaration exists with different type");
+ Alias->takeName(Entry);
+ Entry->replaceAllUsesWith(Alias);
+ Entry->eraseFromParent();
+ } else {
+ Alias->setName(MangledName);
+ }
+
+ // Finally, set up the alias with its proper name and attributes.
+ SetCommonAttributes(AliasDecl.getDecl(), Alias);
+
+ return false;
+}
+
+void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
+ // The constructor used for constructing this as a complete class;
+ // constucts the virtual bases, then calls the base constructor.
+ EmitGlobal(GlobalDecl(D, Ctor_Complete));
+
+ // The constructor used for constructing this as a base class;
+ // ignores virtual bases.
+ EmitGlobal(GlobalDecl(D, Ctor_Base));
+}
+
+void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ // The complete constructor is equivalent to the base constructor
+ // for classes with no virtual bases. Try to emit it as an alias.
+ if (Type == Ctor_Complete &&
+ !D->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(D, Ctor_Complete),
+ GlobalDecl(D, Ctor_Base)))
+ return;
+
+ llvm::Function *Fn = cast<llvm::Function>(GetAddrOfCXXConstructor(D, Type));
+ setFunctionLinkage(D, Fn);
+
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+}
+
+llvm::GlobalValue *
+CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ GlobalDecl GD(D, Type);
+
+ llvm::StringRef Name = getMangledName(GD);
+ if (llvm::GlobalValue *V = GetGlobalValue(Name))
+ return V;
+
+ const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
+ const llvm::FunctionType *FTy =
+ getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type),
+ FPT->isVariadic());
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
+}
+
+void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
+ // The destructor in a virtual table is always a 'deleting'
+ // destructor, which calls the complete destructor and then uses the
+ // appropriate operator delete.
+ if (D->isVirtual())
+ EmitGlobal(GlobalDecl(D, Dtor_Deleting));
+
+ // The destructor used for destructing this as a most-derived class;
+ // call the base destructor and then destructs any virtual bases.
+ EmitGlobal(GlobalDecl(D, Dtor_Complete));
+
+ // The destructor used for destructing this as a base class; ignores
+ // virtual bases.
+ EmitGlobal(GlobalDecl(D, Dtor_Base));
+}
+
+void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ // The complete destructor is equivalent to the base destructor for
+ // classes with no virtual bases, so try to emit it as an alias.
+ if (Type == Dtor_Complete &&
+ !D->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Complete),
+ GlobalDecl(D, Dtor_Base)))
+ return;
+
+ // The base destructor is equivalent to the base destructor of its
+ // base class if there is exactly one non-virtual base class with a
+ // non-trivial destructor, there are no fields with a non-trivial
+ // destructor, and the body of the destructor is trivial.
+ if (Type == Dtor_Base && !TryEmitBaseDestructorAsAlias(D))
+ return;
+
+ llvm::Function *Fn = cast<llvm::Function>(GetAddrOfCXXDestructor(D, Type));
+ setFunctionLinkage(D, Fn);
+
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+}
+
+llvm::GlobalValue *
+CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ GlobalDecl GD(D, Type);
+
+ llvm::StringRef Name = getMangledName(GD);
+ if (llvm::GlobalValue *V = GetGlobalValue(Name))
+ return V;
+
+ const llvm::FunctionType *FTy =
+ getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false);
+
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
+}
+
+static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
+ llvm::Value *This, const llvm::Type *Ty) {
+ Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo();
+
+ llvm::Value *VTable = CGF.Builder.CreateBitCast(This, Ty);
+ VTable = CGF.Builder.CreateLoad(VTable);
+
+ llvm::Value *VFuncPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
+ return CGF.Builder.CreateLoad(VFuncPtr);
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
+ const llvm::Type *Ty) {
+ MD = MD->getCanonicalDecl();
+ uint64_t VTableIndex = CGM.getVTables().getMethodVTableIndex(MD);
+
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *&This, const llvm::Type *Ty) {
+ DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+ uint64_t VTableIndex =
+ CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
+
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
+}
+
+/// Implementation for CGCXXABI. Possibly this should be moved into
+/// the incomplete ABI implementations?
+
+CGCXXABI::~CGCXXABI() {}
+
+static void ErrorUnsupportedABI(CodeGenFunction &CGF,
+ llvm::StringRef S) {
+ Diagnostic &Diags = CGF.CGM.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot yet compile %1 in this ABI");
+ Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
+ DiagID)
+ << S;
+}
+
+static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
+ QualType T) {
+ return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
+}
+
+const llvm::Type *
+CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+}
+
+llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "calls through member pointers");
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+ const llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
+ FPT->isVariadic());
+ return llvm::Constant::getNullValue(FTy->getPointerTo());
+}
+
+llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "loads of member pointers");
+ const llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
+ return llvm::Constant::getNullValue(Ty);
+}
+
+llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) {
+ ErrorUnsupportedABI(CGF, "member function pointer conversions");
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ ErrorUnsupportedABI(CGF, "member function pointer comparison");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "member function pointer null testing");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Constant *
+CGCXXABI::EmitMemberPointerConversion(llvm::Constant *C, const CastExpr *E) {
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Constant *
+CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ return GetBogusMemberPointer(CGM,
+ CGM.getContext().getMemberPointerType(MD->getType(),
+ MD->getParent()->getTypeForDecl()));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const FieldDecl *FD) {
+ return GetBogusMemberPointer(CGM,
+ CGM.getContext().getMemberPointerType(FD->getType(),
+ FD->getParent()->getTypeForDecl()));
+}
+
+bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ // Fake answer.
+ return true;
+}
+
+void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+
+ // FIXME: I'm not entirely sure I like using a fake decl just for code
+ // generation. Maybe we can come up with a better way?
+ ImplicitParamDecl *ThisDecl
+ = ImplicitParamDecl::Create(CGM.getContext(), 0, MD->getLocation(),
+ &CGM.getContext().Idents.get("this"),
+ MD->getThisType(CGM.getContext()));
+ Params.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
+ getThisDecl(CGF) = ThisDecl;
+}
+
+void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ assert(getThisDecl(CGF) && "no 'this' variable for function");
+ getThisValue(CGF)
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
+ "this");
+}
+
+void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ CGF.EmitReturnOfRValue(RV, ResultType);
+}
+
+CharUnits CGCXXABI::GetArrayCookieSize(QualType ElementType) {
+ return CharUnits::Zero();
+}
+
+llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType) {
+ // Should never be called.
+ ErrorUnsupportedABI(CGF, "array cookie initialization");
+ return 0;
+}
+
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize) {
+ ErrorUnsupportedABI(CGF, "array cookie reading");
+
+ // This should be enough to avoid assertions.
+ NumElements = 0;
+ AllocPtr = llvm::Constant::getNullValue(CGF.Builder.getInt8PtrTy());
+ CookieSize = CharUnits::Zero();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h
new file mode 100644
index 0000000..1e6adb0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h
@@ -0,0 +1,36 @@
+//===----- CGCXX.h - C++ related code CodeGen declarations ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCXX_H
+#define CLANG_CODEGEN_CGCXX_H
+
+namespace clang {
+
+/// CXXCtorType - C++ constructor types
+enum CXXCtorType {
+ Ctor_Complete, // Complete object ctor
+ Ctor_Base, // Base object ctor
+ Ctor_CompleteAllocating // Complete object allocating ctor
+};
+
+/// CXXDtorType - C++ destructor types
+enum CXXDtorType {
+ Dtor_Deleting, // Deleting dtor
+ Dtor_Complete, // Complete object dtor
+ Dtor_Base // Base object dtor
+};
+
+} // end namespace clang
+
+#endif // CLANG_CODEGEN_CGCXX_H
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
new file mode 100644
index 0000000..367e345
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
@@ -0,0 +1,230 @@
+//===----- CGCXXABI.h - Interface to C++ ABIs -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CXXABI_H
+#define CLANG_CODEGEN_CXXABI_H
+
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Constant;
+ class Type;
+ class Value;
+
+ template <class T> class SmallVectorImpl;
+}
+
+namespace clang {
+ class CastExpr;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class CXXRecordDecl;
+ class FieldDecl;
+
+namespace CodeGen {
+ class CodeGenFunction;
+ class CodeGenModule;
+ class MangleContext;
+
+/// Implements C++ ABI-specific code generation functions.
+class CGCXXABI {
+protected:
+ CodeGenModule &CGM;
+
+ CGCXXABI(CodeGenModule &CGM) : CGM(CGM) {}
+
+protected:
+ ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
+ return CGF.CXXThisDecl;
+ }
+ llvm::Value *&getThisValue(CodeGenFunction &CGF) {
+ return CGF.CXXThisValue;
+ }
+
+ ImplicitParamDecl *&getVTTDecl(CodeGenFunction &CGF) {
+ return CGF.CXXVTTDecl;
+ }
+ llvm::Value *&getVTTValue(CodeGenFunction &CGF) {
+ return CGF.CXXVTTValue;
+ }
+
+ /// Build a parameter variable suitable for 'this'.
+ void BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params);
+
+ /// Perform prolog initialization of the parameter variable suitable
+ /// for 'this' emitted by BuildThisParam.
+ void EmitThisParam(CodeGenFunction &CGF);
+
+ ASTContext &getContext() const { return CGM.getContext(); }
+
+public:
+
+ virtual ~CGCXXABI();
+
+ /// Gets the mangle context.
+ virtual MangleContext &getMangleContext() = 0;
+
+ /// Find the LLVM type used to represent the given member pointer
+ /// type.
+ virtual const llvm::Type *
+ ConvertMemberPointerType(const MemberPointerType *MPT);
+
+ /// Load a member function from an object and a member function
+ /// pointer. Apply the this-adjustment and set 'This' to the
+ /// adjusted value.
+ virtual llvm::Value *
+ EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Calculate an l-value from an object and a data member pointer.
+ virtual llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Perform a derived-to-base or base-to-derived member pointer
+ /// conversion.
+ virtual llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src);
+
+ /// Perform a derived-to-base or base-to-derived member pointer
+ /// conversion on a constant member pointer.
+ virtual llvm::Constant *EmitMemberPointerConversion(llvm::Constant *C,
+ const CastExpr *E);
+
+ /// Return true if the given member pointer can be zero-initialized
+ /// (in the C++ sense) with an LLVM zeroinitializer.
+ virtual bool isZeroInitializable(const MemberPointerType *MPT);
+
+ /// Create a null member pointer of the given type.
+ virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+
+ /// Create a member pointer for the given method.
+ virtual llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
+
+ /// Create a member pointer for the given field.
+ virtual llvm::Constant *EmitMemberPointer(const FieldDecl *FD);
+
+ /// Emit a comparison between two member pointers. Returns an i1.
+ virtual llvm::Value *
+ EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality);
+
+ /// Determine if a member pointer is non-null. Returns an i1.
+ virtual llvm::Value *
+ EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Build the signature of the given constructor variant by adding
+ /// any required parameters. For convenience, ResTy has been
+ /// initialized to 'void', and ArgTys has been initialized with the
+ /// type of 'this' (although this may be changed by the ABI) and
+ /// will have the formal parameters added to it afterwards.
+ ///
+ /// If there are ever any ABIs where the implicit parameters are
+ /// intermixed with the formal parameters, we can address those
+ /// then.
+ virtual void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) = 0;
+
+ /// Build the signature of the given destructor variant by adding
+ /// any required parameters. For convenience, ResTy has been
+ /// initialized to 'void' and ArgTys has been initialized with the
+ /// type of 'this' (although this may be changed by the ABI).
+ virtual void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) = 0;
+
+ /// Build the ABI-specific portion of the parameter list for a
+ /// function. This generally involves a 'this' parameter and
+ /// possibly some extra data for constructors and destructors.
+ ///
+ /// ABIs may also choose to override the return type, which has been
+ /// initialized with the formal return type of the function.
+ virtual void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) = 0;
+
+ /// Emit the ABI-specific prolog for the function.
+ virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
+
+ virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType);
+
+ /**************************** Array cookies ******************************/
+
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given type. May return 0 to indicate that no
+ /// array cookie is required.
+ ///
+ /// Several cases are filtered out before this method is called:
+ /// - non-array allocations never need a cookie
+ /// - calls to ::operator new(size_t, void*) never need a cookie
+ ///
+ /// \param ElementType - the allocated type of the expression,
+ /// i.e. the pointee type of the expression result type
+ virtual CharUnits GetArrayCookieSize(QualType ElementType);
+
+ /// Initialize the array cookie for the given allocation.
+ ///
+ /// \param NewPtr - a char* which is the presumed-non-null
+ /// return value of the allocation function
+ /// \param NumElements - the computed number of elements,
+ /// potentially collapsed from the multidimensional array case
+ /// \param ElementType - the base element allocated type,
+ /// i.e. the allocated type after stripping all array types
+ virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType);
+
+ /// Reads the array cookie associated with the given pointer,
+ /// if it has one.
+ ///
+ /// \param Ptr - a pointer to the first element in the array
+ /// \param ElementType - the base element type of elements of the array
+ /// \param NumElements - an out parameter which will be initialized
+ /// with the number of elements allocated, or zero if there is no
+ /// cookie
+ /// \param AllocPtr - an out parameter which will be initialized
+ /// with a char* pointing to the address returned by the allocation
+ /// function
+ /// \param CookieSize - an out parameter which will be initialized
+ /// with the size of the cookie, or zero if there is no cookie
+ virtual void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+};
+
+/// Creates an instance of a C++ ABI class.
+CGCXXABI *CreateARMCXXABI(CodeGenModule &CGM);
+CGCXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
+CGCXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM);
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
new file mode 100644
index 0000000..475dfa5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -0,0 +1,1397 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCall.h"
+#include "CGCXXABI.h"
+#include "ABIInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Attributes.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+/***/
+
+static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
+ switch (CC) {
+ default: return llvm::CallingConv::C;
+ case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
+ case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
+ case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
+ // TODO: add support for CC_X86Pascal to llvm
+ }
+}
+
+/// Derives the 'this' type for codegen purposes, i.e. ignoring method
+/// qualification.
+/// FIXME: address space qualification?
+static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
+ QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
+}
+
+/// Returns the canonical formal type of the given C++ method.
+static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
+ return MD->getType()->getCanonicalTypeUnqualified()
+ .getAs<FunctionProtoType>();
+}
+
+/// Returns the "extra-canonicalized" return type, which discards
+/// qualifiers on the return type. Codegen doesn't care about them,
+/// and it makes ABI code a little easier to be able to assume that
+/// all parameter and return types are top-level unqualified.
+static CanQualType GetReturnType(QualType RetTy) {
+ return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
+}
+
+const CGFunctionInfo &
+CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP,
+ bool IsRecursive) {
+ return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
+ llvm::SmallVector<CanQualType, 16>(),
+ FTNP->getExtInfo(), IsRecursive);
+}
+
+/// \param Args - contains any initial parameters besides those
+/// in the formal type
+static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys,
+ CanQual<FunctionProtoType> FTP,
+ bool IsRecursive = false) {
+ // FIXME: Kill copy.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FTP->getArgType(i));
+ CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
+ return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP,
+ bool IsRecursive) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive);
+}
+
+static CallingConv getCallingConventionForDecl(const Decl *D) {
+ // Set the appropriate calling convention for the Function.
+ if (D->hasAttr<StdCallAttr>())
+ return CC_X86StdCall;
+
+ if (D->hasAttr<FastCallAttr>())
+ return CC_X86FastCall;
+
+ if (D->hasAttr<ThisCallAttr>())
+ return CC_X86ThisCall;
+
+ if (D->hasAttr<PascalAttr>())
+ return CC_X86Pascal;
+
+ return CC_C;
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+
+ // Add the 'this' pointer.
+ ArgTys.push_back(GetThisType(Context, RD));
+
+ return ::getFunctionInfo(*this, ArgTys,
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+
+ assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
+ assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
+
+ // Add the 'this' pointer unless this is a static method.
+ if (MD->isInstance())
+ ArgTys.push_back(GetThisType(Context, MD->getParent()));
+
+ return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ ArgTys.push_back(GetThisType(Context, D->getParent()));
+ CanQualType ResTy = Context.VoidTy;
+
+ TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
+
+ CanQual<FunctionProtoType> FTP = GetFormalType(D);
+
+ // Add the formal parameters.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FTP->getArgType(i));
+
+ return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ llvm::SmallVector<CanQualType, 2> ArgTys;
+ ArgTys.push_back(GetThisType(Context, D->getParent()));
+ CanQualType ResTy = Context.VoidTy;
+
+ TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
+
+ CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
+
+ return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (MD->isInstance())
+ return getFunctionInfo(MD);
+
+ CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
+ assert(isa<FunctionType>(FTy));
+ if (isa<FunctionNoProtoType>(FTy))
+ return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
+ assert(isa<FunctionProtoType>(FTy));
+ return getFunctionInfo(FTy.getAs<FunctionProtoType>());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
+ ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
+ // FIXME: Kill copy?
+ for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
+ e = MD->param_end(); i != e; ++i) {
+ ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
+ }
+ return getFunctionInfo(GetReturnType(MD->getResultType()),
+ ArgTys,
+ FunctionType::ExtInfo(
+ /*NoReturn*/ false,
+ /*RegParm*/ 0,
+ getCallingConventionForDecl(MD)));
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
+ // FIXME: Do we need to handle ObjCMethodDecl?
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
+ return getFunctionInfo(CD, GD.getCtorType());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
+ return getFunctionInfo(DD, GD.getDtorType());
+
+ return getFunctionInfo(FD);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const CallArgList &Args,
+ const FunctionType::ExtInfo &Info) {
+ // FIXME: Kill copy.
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i)
+ ArgTys.push_back(Context.getCanonicalParamType(i->second));
+ return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const FunctionArgList &Args,
+ const FunctionType::ExtInfo &Info) {
+ // FIXME: Kill copy.
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i)
+ ArgTys.push_back(Context.getCanonicalParamType(i->second));
+ return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
+ const llvm::SmallVectorImpl<CanQualType> &ArgTys,
+ const FunctionType::ExtInfo &Info,
+ bool IsRecursive) {
+#ifndef NDEBUG
+ for (llvm::SmallVectorImpl<CanQualType>::const_iterator
+ I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
+ assert(I->isCanonicalAsParam());
+#endif
+
+ unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
+
+ // Lookup or create unique function info.
+ llvm::FoldingSetNodeID ID;
+ CGFunctionInfo::Profile(ID, Info, ResTy,
+ ArgTys.begin(), ArgTys.end());
+
+ void *InsertPos = 0;
+ CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
+ if (FI)
+ return *FI;
+
+ // Construct the function info.
+ FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy,
+ ArgTys.data(), ArgTys.size());
+ FunctionInfos.InsertNode(FI, InsertPos);
+
+ // Compute ABI information.
+ getABIInfo().computeInfo(*FI);
+
+ // Loop over all of the computed argument and return value info. If any of
+ // them are direct or extend without a specified coerce type, specify the
+ // default now.
+ ABIArgInfo &RetInfo = FI->getReturnInfo();
+ if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
+ RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
+
+ for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
+ I != E; ++I)
+ if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
+ I->info.setCoerceToType(ConvertTypeRecursive(I->type));
+
+ // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
+ // types, resolve them now. These pointers may point to this function, which
+ // we *just* filled in the FunctionInfo for.
+ if (!IsRecursive && !PointersToResolve.empty())
+ HandleLateResolvedPointers();
+
+ return *FI;
+}
+
+CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
+ bool _NoReturn, unsigned _RegParm,
+ CanQualType ResTy,
+ const CanQualType *ArgTys,
+ unsigned NumArgTys)
+ : CallingConvention(_CallingConvention),
+ EffectiveCallingConvention(_CallingConvention),
+ NoReturn(_NoReturn), RegParm(_RegParm)
+{
+ NumArgs = NumArgTys;
+
+ // FIXME: Coallocate with the CGFunctionInfo object.
+ Args = new ArgInfo[1 + NumArgTys];
+ Args[0].type = ResTy;
+ for (unsigned i = 0; i != NumArgTys; ++i)
+ Args[1 + i].type = ArgTys[i];
+}
+
+/***/
+
+void CodeGenTypes::GetExpandedTypes(QualType Ty,
+ std::vector<const llvm::Type*> &ArgTys,
+ bool IsRecursive) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+ const RecordDecl *RD = RT->getDecl();
+ assert(!RD->hasFlexibleArrayMember() &&
+ "Cannot expand structure with flexible array.");
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+
+ QualType FT = FD->getType();
+ if (CodeGenFunction::hasAggregateLLVMType(FT))
+ GetExpandedTypes(FT, ArgTys, IsRecursive);
+ else
+ ArgTys.push_back(ConvertType(FT, IsRecursive));
+ }
+}
+
+llvm::Function::arg_iterator
+CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
+ llvm::Function::arg_iterator AI) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+
+ RecordDecl *RD = RT->getDecl();
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
+ llvm::Value *Addr = LV.getAddress();
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, 0);
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ AI = ExpandTypeFromArgs(FT, LV, AI);
+ } else {
+ EmitStoreThroughLValue(RValue::get(AI), LV, FT);
+ ++AI;
+ }
+ }
+
+ return AI;
+}
+
+void
+CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+ llvm::SmallVector<llvm::Value*, 16> &Args) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+
+ RecordDecl *RD = RT->getDecl();
+ assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, 0);
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
+ } else {
+ RValue RV = EmitLoadOfLValue(LV, FT);
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+ Args.push_back(RV.getScalarVal());
+ }
+ }
+}
+
+/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
+/// accessing some number of bytes out of it, try to gep into the struct to get
+/// at its inner goodness. Dive as deep as possible without entering an element
+/// with an in-memory size smaller than DstSize.
+static llvm::Value *
+EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
+ const llvm::StructType *SrcSTy,
+ uint64_t DstSize, CodeGenFunction &CGF) {
+ // We can't dive into a zero-element struct.
+ if (SrcSTy->getNumElements() == 0) return SrcPtr;
+
+ const llvm::Type *FirstElt = SrcSTy->getElementType(0);
+
+ // If the first elt is at least as large as what we're looking for, or if the
+ // first element is the same size as the whole struct, we can enter it.
+ uint64_t FirstEltSize =
+ CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
+ if (FirstEltSize < DstSize &&
+ FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
+ return SrcPtr;
+
+ // GEP into the first element.
+ SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
+
+ // If the first element is a struct, recurse.
+ const llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
+ return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+
+ return SrcPtr;
+}
+
+/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
+/// are either integers or pointers. This does a truncation of the value if it
+/// is too large or a zero extension if it is too small.
+static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
+ const llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ if (Val->getType() == Ty)
+ return Val;
+
+ if (isa<llvm::PointerType>(Val->getType())) {
+ // If this is Pointer->Pointer avoid conversion to and from int.
+ if (isa<llvm::PointerType>(Ty))
+ return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
+
+ // Convert the pointer to an integer so we can play with its width.
+ Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
+ }
+
+ const llvm::Type *DestIntTy = Ty;
+ if (isa<llvm::PointerType>(DestIntTy))
+ DestIntTy = CGF.IntPtrTy;
+
+ if (Val->getType() != DestIntTy)
+ Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
+
+ if (isa<llvm::PointerType>(Ty))
+ Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
+ return Val;
+}
+
+
+
+/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
+/// a pointer to an object of type \arg Ty.
+///
+/// This safely handles the case when the src type is smaller than the
+/// destination type; in this situation the values of bits which not
+/// present in the src are undefined.
+static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
+ const llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ const llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+
+ // If SrcTy and Ty are the same, just do a load.
+ if (SrcTy == Ty)
+ return CGF.Builder.CreateLoad(SrcPtr);
+
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+
+ if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
+ SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+ SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ }
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
+ (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
+ return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
+ }
+
+ // If load is legal, just bitcast the src pointer.
+ if (SrcSize >= DstSize) {
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ return Load;
+ }
+
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
+ llvm::StoreInst *Store =
+ CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ Store->setAlignment(1);
+ return CGF.Builder.CreateLoad(Tmp);
+}
+
+/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
+/// where the source and destination may have different types.
+///
+/// This safely handles the case when the src type is larger than the
+/// destination type; the upper bits of the src will be lost.
+static void CreateCoercedStore(llvm::Value *Src,
+ llvm::Value *DstPtr,
+ bool DstIsVolatile,
+ CodeGenFunction &CGF) {
+ const llvm::Type *SrcTy = Src->getType();
+ const llvm::Type *DstTy =
+ cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ if (SrcTy == DstTy) {
+ CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
+ return;
+ }
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+
+ if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
+ DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
+ DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ }
+
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
+ (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
+ Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
+ CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
+ return;
+ }
+
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ // If store is legal, just bitcast the src pointer.
+ if (SrcSize <= DstSize) {
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
+ } else {
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
+ CGF.Builder.CreateStore(Src, Tmp);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
+ }
+}
+
+/***/
+
+bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
+ return FI.getReturnInfo().isIndirect();
+}
+
+bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
+ if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ default:
+ return false;
+ case BuiltinType::Float:
+ return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
+ case BuiltinType::Double:
+ return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
+ case BuiltinType::LongDouble:
+ return getContext().Target.useObjCFPRetForRealType(
+ TargetInfo::LongDouble);
+ }
+ }
+
+ return false;
+}
+
+const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+ const CGFunctionInfo &FI = getFunctionInfo(GD);
+
+ // For definition purposes, don't consider a K&R function variadic.
+ bool Variadic = false;
+ if (const FunctionProtoType *FPT =
+ cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
+ Variadic = FPT->isVariadic();
+
+ return GetFunctionType(FI, Variadic, false);
+}
+
+const llvm::FunctionType *
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
+ bool IsRecursive) {
+ std::vector<const llvm::Type*> ArgTys;
+
+ const llvm::Type *ResultType = 0;
+
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ ResultType = RetAI.getCoerceToType();
+ break;
+
+ case ABIArgInfo::Indirect: {
+ assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
+ const llvm::Type *STy = ConvertType(RetTy, IsRecursive);
+ ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
+ break;
+ }
+
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ const ABIArgInfo &AI = it->info;
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Indirect: {
+ // indirect arguments are always on the stack, which is addr space #0.
+ const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
+ ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // If the coerce-to type is a first class aggregate, flatten it. Either
+ // way is semantically identical, but fast-isel and the optimizer
+ // generally likes scalar values better than FCAs.
+ const llvm::Type *ArgTy = AI.getCoerceToType();
+ if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) {
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ ArgTys.push_back(STy->getElementType(i));
+ } else {
+ ArgTys.push_back(ArgTy);
+ }
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ GetExpandedTypes(it->type, ArgTys, IsRecursive);
+ break;
+ }
+ }
+
+ return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
+}
+
+const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ if (!VerifyFuncTypeComplete(FPT)) {
+ const CGFunctionInfo *Info;
+ if (isa<CXXDestructorDecl>(MD))
+ Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ else
+ Info = &getFunctionInfo(MD);
+ return GetFunctionType(*Info, FPT->isVariadic(), false);
+ }
+
+ return llvm::OpaqueType::get(getLLVMContext());
+}
+
+void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
+ const Decl *TargetDecl,
+ AttributeListType &PAL,
+ unsigned &CallingConv) {
+ unsigned FuncAttrs = 0;
+ unsigned RetAttrs = 0;
+
+ CallingConv = FI.getEffectiveCallingConvention();
+
+ if (FI.isNoReturn())
+ FuncAttrs |= llvm::Attribute::NoReturn;
+
+ // FIXME: handle sseregparm someday...
+ if (TargetDecl) {
+ if (TargetDecl->hasAttr<NoThrowAttr>())
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
+ const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
+ if (FPT && FPT->hasEmptyExceptionSpec())
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ }
+
+ if (TargetDecl->hasAttr<NoReturnAttr>())
+ FuncAttrs |= llvm::Attribute::NoReturn;
+ if (TargetDecl->hasAttr<ConstAttr>())
+ FuncAttrs |= llvm::Attribute::ReadNone;
+ else if (TargetDecl->hasAttr<PureAttr>())
+ FuncAttrs |= llvm::Attribute::ReadOnly;
+ if (TargetDecl->hasAttr<MallocAttr>())
+ RetAttrs |= llvm::Attribute::NoAlias;
+ }
+
+ if (CodeGenOpts.OptimizeSize)
+ FuncAttrs |= llvm::Attribute::OptimizeForSize;
+ if (CodeGenOpts.DisableRedZone)
+ FuncAttrs |= llvm::Attribute::NoRedZone;
+ if (CodeGenOpts.NoImplicitFloat)
+ FuncAttrs |= llvm::Attribute::NoImplicitFloat;
+
+ QualType RetTy = FI.getReturnType();
+ unsigned Index = 1;
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Extend:
+ if (RetTy->hasSignedIntegerRepresentation())
+ RetAttrs |= llvm::Attribute::SExt;
+ else if (RetTy->hasUnsignedIntegerRepresentation())
+ RetAttrs |= llvm::Attribute::ZExt;
+ break;
+ case ABIArgInfo::Direct:
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Indirect:
+ PAL.push_back(llvm::AttributeWithIndex::get(Index,
+ llvm::Attribute::StructRet));
+ ++Index;
+ // sret disables readnone and readonly
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+
+ if (RetAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
+
+ // FIXME: we need to honor command line settings also.
+ // FIXME: RegParm should be reduced in case of nested functions and/or global
+ // register variable.
+ signed RegParm = FI.getRegParm();
+
+ unsigned PointerWidth = getContext().Target.getPointerWidth(0);
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ QualType ParamType = it->type;
+ const ABIArgInfo &AI = it->info;
+ unsigned Attributes = 0;
+
+ // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
+ // have the corresponding parameter variable. It doesn't make
+ // sense to do it here because parameters are so fucked up.
+ switch (AI.getKind()) {
+ case ABIArgInfo::Extend:
+ if (ParamType->isSignedIntegerType())
+ Attributes |= llvm::Attribute::SExt;
+ else if (ParamType->isUnsignedIntegerType())
+ Attributes |= llvm::Attribute::ZExt;
+ // FALL THROUGH
+ case ABIArgInfo::Direct:
+ if (RegParm > 0 &&
+ (ParamType->isIntegerType() || ParamType->isPointerType())) {
+ RegParm -=
+ (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
+ if (RegParm >= 0)
+ Attributes |= llvm::Attribute::InReg;
+ }
+ // FIXME: handle sseregparm someday...
+
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(AI.getCoerceToType()))
+ Index += STy->getNumElements()-1; // 1 will be added below.
+ break;
+
+ case ABIArgInfo::Indirect:
+ if (AI.getIndirectByVal())
+ Attributes |= llvm::Attribute::ByVal;
+
+ Attributes |=
+ llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
+ // byval disables readnone and readonly.
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Ignore:
+ // Skip increment, no matching LLVM parameter.
+ continue;
+
+ case ABIArgInfo::Expand: {
+ std::vector<const llvm::Type*> Tys;
+ // FIXME: This is rather inefficient. Do we ever actually need to do
+ // anything here? The result should be just reconstructed on the other
+ // side, so extension should be a non-issue.
+ getTypes().GetExpandedTypes(ParamType, Tys, false);
+ Index += Tys.size();
+ continue;
+ }
+ }
+
+ if (Attributes)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
+ ++Index;
+ }
+ if (FuncAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
+}
+
+void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+ // If this is an implicit-return-zero function, go ahead and
+ // initialize the return value. TODO: it might be nice to have
+ // a more general mechanism for this that didn't require synthesized
+ // return statements.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
+ if (FD->hasImplicitReturnZero()) {
+ QualType RetTy = FD->getResultType().getUnqualifiedType();
+ const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
+ llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
+ Builder.CreateStore(Zero, ReturnValue);
+ }
+ }
+
+ // FIXME: We no longer need the types from FunctionArgList; lift up and
+ // simplify.
+
+ // Emit allocs for param decls. Give the LLVM Argument nodes names.
+ llvm::Function::arg_iterator AI = Fn->arg_begin();
+
+ // Name the struct return argument.
+ if (CGM.ReturnTypeUsesSRet(FI)) {
+ AI->setName("agg.result");
+ ++AI;
+ }
+
+ assert(FI.arg_size() == Args.size() &&
+ "Mismatch between function signature & arguments.");
+ CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i, ++info_it) {
+ const VarDecl *Arg = i->first;
+ QualType Ty = info_it->type;
+ const ABIArgInfo &ArgI = info_it->info;
+
+ switch (ArgI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ llvm::Value *V = AI;
+ if (hasAggregateLLVMType(Ty)) {
+ // Do nothing, aggregates and complex variables are accessed by
+ // reference.
+ } else {
+ // Load scalar value from indirect argument.
+ unsigned Alignment = getContext().getTypeAlignInChars(Ty).getQuantity();
+ V = EmitLoadOfScalar(V, false, Alignment, Ty);
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // If we have the trivial case, handle it with no muss and fuss.
+ if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
+ ArgI.getCoerceToType() == ConvertType(Ty) &&
+ ArgI.getDirectOffset() == 0) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ llvm::Value *V = AI;
+
+ if (Arg->getType().isRestrictQualified())
+ AI->addAttr(llvm::Attribute::NoAlias);
+
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
+
+ // The alignment we need to use is the max of the requested alignment for
+ // the argument plus the alignment required by our access code below.
+ unsigned AlignmentToUse =
+ CGF.CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
+ AlignmentToUse = std::max(AlignmentToUse,
+ (unsigned)getContext().getDeclAlign(Arg).getQuantity());
+
+ Alloca->setAlignment(AlignmentToUse);
+ llvm::Value *V = Alloca;
+ llvm::Value *Ptr = V; // Pointer to store into.
+
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgI.getDirectOffset()) {
+ Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
+ Ptr = Builder.CreateBitCast(Ptr,
+ llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
+ }
+
+ // If the coerce-to type is a first class aggregate, we flatten it and
+ // pass the elements. Either way is semantically identical, but fast-isel
+ // and the optimizer generally likes scalar values better than FCAs.
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
+ Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
+ } else {
+ // Simple case, just do a coerced store of the argument into the alloca.
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce");
+ CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
+ }
+
+
+ // Match to what EmitParmDecl is expecting for this type.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ continue; // Skip ++AI increment, already done.
+ }
+
+ case ABIArgInfo::Expand: {
+ // If this structure was expanded into multiple arguments then
+ // we need to create a temporary and reconstruct it from the
+ // arguments.
+ llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
+ llvm::Function::arg_iterator End =
+ ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
+ EmitParmDecl(*Arg, Temp);
+
+ // Name the arguments used in expansion and increment AI.
+ unsigned Index = 0;
+ for (; AI != End; ++AI, ++Index)
+ AI->setName(Arg->getName() + "." + llvm::Twine(Index));
+ continue;
+ }
+
+ case ABIArgInfo::Ignore:
+ // Initialize the local variable appropriately.
+ if (hasAggregateLLVMType(Ty))
+ EmitParmDecl(*Arg, CreateMemTemp(Ty));
+ else
+ EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
+
+ // Skip increment, no matching LLVM parameter.
+ continue;
+ }
+
+ ++AI;
+ }
+ assert(AI == Fn->arg_end() && "Argument mismatch!");
+}
+
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
+ // Functions with no result always return void.
+ if (ReturnValue == 0) {
+ Builder.CreateRetVoid();
+ return;
+ }
+
+ llvm::DebugLoc RetDbgLoc;
+ llvm::Value *RV = 0;
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
+ if (RetTy->isAnyComplexType()) {
+ ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
+ StoreComplexToAddr(RT, CurFn->arg_begin(), false);
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Do nothing; aggregrates get evaluated directly into the destination.
+ } else {
+ EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
+ false, Alignment, RetTy);
+ }
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
+ // The internal return value temp always will have pointer-to-return-type
+ // type, just do a load.
+
+ // If the instruction right before the insertion point is a store to the
+ // return value, we can elide the load, zap the store, and usually zap the
+ // alloca.
+ llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
+ llvm::StoreInst *SI = 0;
+ if (InsertBB->empty() ||
+ !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
+ SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
+ RV = Builder.CreateLoad(ReturnValue);
+ } else {
+ // Get the stored value and nuke the now-dead store.
+ RetDbgLoc = SI->getDebugLoc();
+ RV = SI->getValueOperand();
+ SI->eraseFromParent();
+
+ // If that was the only use of the return value, nuke it as well now.
+ if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
+ cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
+ ReturnValue = 0;
+ }
+ }
+ } else {
+ llvm::Value *V = ReturnValue;
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
+ V = Builder.CreateConstGEP1_32(V, Offs);
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+
+ RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
+ }
+ break;
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+
+ llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
+ if (!RetDbgLoc.isUnknown())
+ Ret->setDebugLoc(RetDbgLoc);
+}
+
+RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
+ // StartFunction converted the ABI-lowered parameter(s) into a
+ // local alloca. We need to turn that into an r-value suitable
+ // for EmitCall.
+ llvm::Value *Local = GetAddrOfLocalVar(Param);
+
+ QualType ArgType = Param->getType();
+
+ // For the most part, we just need to load the alloca, except:
+ // 1) aggregate r-values are actually pointers to temporaries, and
+ // 2) references to aggregates are pointers directly to the aggregate.
+ // I don't know why references to non-aggregates are different here.
+ if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
+ if (hasAggregateLLVMType(RefType->getPointeeType()))
+ return RValue::getAggregate(Local);
+
+ // Locals which are references to scalars are represented
+ // with allocas holding the pointer.
+ return RValue::get(Builder.CreateLoad(Local));
+ }
+
+ if (ArgType->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
+
+ if (hasAggregateLLVMType(ArgType))
+ return RValue::getAggregate(Local);
+
+ unsigned Alignment = getContext().getDeclAlign(Param).getQuantity();
+ return RValue::get(EmitLoadOfScalar(Local, false, Alignment, ArgType));
+}
+
+RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
+ if (ArgType->isReferenceType())
+ return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
+
+ return EmitAnyExprToTemp(E);
+}
+
+/// Emits a call or invoke instruction to the given function, depending
+/// on the current state of the EH stack.
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ llvm::Value * const *ArgBegin,
+ llvm::Value * const *ArgEnd,
+ const llvm::Twine &Name) {
+ llvm::BasicBlock *InvokeDest = getInvokeDest();
+ if (!InvokeDest)
+ return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name);
+
+ llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
+ llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
+ ArgBegin, ArgEnd, Name);
+ EmitBlock(ContBB);
+ return Invoke;
+}
+
+RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ const CallArgList &CallArgs,
+ const Decl *TargetDecl,
+ llvm::Instruction **callOrInvoke) {
+ // FIXME: We no longer need the types from CallArgs; lift up and simplify.
+ llvm::SmallVector<llvm::Value*, 16> Args;
+
+ // Handle struct-return functions by passing a pointer to the
+ // location that we would like to return into.
+ QualType RetTy = CallInfo.getReturnType();
+ const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
+
+
+ // If the call returns a temporary with struct return, create a temporary
+ // alloca to hold the result, unless one is given to us.
+ if (CGM.ReturnTypeUsesSRet(CallInfo)) {
+ llvm::Value *Value = ReturnValue.getValue();
+ if (!Value)
+ Value = CreateMemTemp(RetTy);
+ Args.push_back(Value);
+ }
+
+ assert(CallInfo.arg_size() == CallArgs.size() &&
+ "Mismatch between function signature & arguments.");
+ CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
+ for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
+ I != E; ++I, ++info_it) {
+ const ABIArgInfo &ArgInfo = info_it->info;
+ RValue RV = I->first;
+
+ unsigned Alignment =
+ getContext().getTypeAlignInChars(I->second).getQuantity();
+ switch (ArgInfo.getKind()) {
+ case ABIArgInfo::Indirect: {
+ if (RV.isScalar() || RV.isComplex()) {
+ // Make a temporary alloca to pass the argument.
+ Args.push_back(CreateMemTemp(I->second));
+ if (RV.isScalar())
+ EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
+ Alignment, I->second);
+ else
+ StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+ } else {
+ Args.push_back(RV.getAggregateAddr());
+ }
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
+ ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
+ ArgInfo.getDirectOffset() == 0) {
+ if (RV.isScalar())
+ Args.push_back(RV.getScalarVal());
+ else
+ Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
+ break;
+ }
+
+ // FIXME: Avoid the conversion through memory if possible.
+ llvm::Value *SrcPtr;
+ if (RV.isScalar()) {
+ SrcPtr = CreateMemTemp(I->second, "coerce");
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment,
+ I->second);
+ } else if (RV.isComplex()) {
+ SrcPtr = CreateMemTemp(I->second, "coerce");
+ StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
+ } else
+ SrcPtr = RV.getAggregateAddr();
+
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgInfo.getDirectOffset()) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
+ SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
+
+ }
+
+ // If the coerce-to type is a first class aggregate, we flatten it and
+ // pass the elements. Either way is semantically identical, but fast-isel
+ // and the optimizer generally likes scalar values better than FCAs.
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(STy));
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
+ llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
+ // We don't know what we're loading from.
+ LI->setAlignment(1);
+ Args.push_back(LI);
+ }
+ } else {
+ // In the simple case, just pass the coerced loaded value.
+ Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+ *this));
+ }
+
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ ExpandTypeToArgs(I->second, RV, Args);
+ break;
+ }
+ }
+
+ // If the callee is a bitcast of a function to a varargs pointer to function
+ // type, check to see if we can remove the bitcast. This handles some cases
+ // with unprototyped functions.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
+ if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
+ const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
+ const llvm::FunctionType *CurFT =
+ cast<llvm::FunctionType>(CurPT->getElementType());
+ const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
+
+ if (CE->getOpcode() == llvm::Instruction::BitCast &&
+ ActualFT->getReturnType() == CurFT->getReturnType() &&
+ ActualFT->getNumParams() == CurFT->getNumParams() &&
+ ActualFT->getNumParams() == Args.size()) {
+ bool ArgsMatch = true;
+ for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
+ if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
+ ArgsMatch = false;
+ break;
+ }
+
+ // Strip the cast if we can get away with it. This is a nice cleanup,
+ // but also allows us to inline the function at -O0 if it is marked
+ // always_inline.
+ if (ArgsMatch)
+ Callee = CalleeF;
+ }
+ }
+
+
+ unsigned CallingConv;
+ CodeGen::AttributeListType AttributeList;
+ CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
+ llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.end());
+
+ llvm::BasicBlock *InvokeDest = 0;
+ if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
+ InvokeDest = getInvokeDest();
+
+ llvm::CallSite CS;
+ if (!InvokeDest) {
+ CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
+ } else {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
+ Args.data(), Args.data()+Args.size());
+ EmitBlock(Cont);
+ }
+ if (callOrInvoke)
+ *callOrInvoke = CS.getInstruction();
+
+ CS.setAttributes(Attrs);
+ CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+
+ // If the call doesn't return, finish the basic block and clear the
+ // insertion point; this allows the rest of IRgen to discard
+ // unreachable code.
+ if (CS.doesNotReturn()) {
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+
+ // FIXME: For now, emit a dummy basic block because expr emitters in
+ // generally are not ready to handle emitting expressions at unreachable
+ // points.
+ EnsureInsertPoint();
+
+ // Return a reasonable RValue.
+ return GetUndefRValue(RetTy);
+ }
+
+ llvm::Instruction *CI = CS.getInstruction();
+ if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
+ CI->setName("call");
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(Args[0]);
+ return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
+ }
+
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
+ if (RetTy->isAnyComplexType()) {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ Builder.CreateStore(CI, DestPtr, DestIsVolatile);
+ return RValue::getAggregate(DestPtr);
+ }
+ return RValue::get(CI);
+ }
+
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "coerce");
+ DestIsVolatile = false;
+ }
+
+ // If the value is offset in memory, apply the offset now.
+ llvm::Value *StorePtr = DestPtr;
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
+ StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
+ StorePtr = Builder.CreateBitCast(StorePtr,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(DestPtr);
+ return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
+ }
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+
+ assert(0 && "Unhandled ABIArgInfo::Kind");
+ return RValue::get(0);
+}
+
+/* VarArg handling */
+
+llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
+ return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
new file mode 100644
index 0000000..41e707a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
@@ -0,0 +1,164 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCALL_H
+#define CLANG_CODEGEN_CGCALL_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Value.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/CanonicalType.h"
+
+#include "CGValue.h"
+
+// FIXME: Restructure so we don't have to expose so much stuff.
+#include "ABIInfo.h"
+
+namespace llvm {
+ struct AttributeWithIndex;
+ class Function;
+ class Type;
+ class Value;
+
+ template<typename T, unsigned> class SmallVector;
+}
+
+namespace clang {
+ class ASTContext;
+ class Decl;
+ class FunctionDecl;
+ class ObjCMethodDecl;
+ class VarDecl;
+
+namespace CodeGen {
+ typedef llvm::SmallVector<llvm::AttributeWithIndex, 8> AttributeListType;
+
+ /// CallArgList - Type for representing both the value and type of
+ /// arguments in a call.
+ typedef llvm::SmallVector<std::pair<RValue, QualType>, 16> CallArgList;
+
+ /// FunctionArgList - Type for representing both the decl and type
+ /// of parameters to a function. The decl must be either a
+ /// ParmVarDecl or ImplicitParamDecl.
+ typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>,
+ 16> FunctionArgList;
+
+ /// CGFunctionInfo - Class to encapsulate the information about a
+ /// function definition.
+ class CGFunctionInfo : public llvm::FoldingSetNode {
+ struct ArgInfo {
+ CanQualType type;
+ ABIArgInfo info;
+ };
+
+ /// The LLVM::CallingConv to use for this function (as specified by the
+ /// user).
+ unsigned CallingConvention;
+
+ /// The LLVM::CallingConv to actually use for this function, which may
+ /// depend on the ABI.
+ unsigned EffectiveCallingConvention;
+
+ /// Whether this function is noreturn.
+ bool NoReturn;
+
+ unsigned NumArgs;
+ ArgInfo *Args;
+
+ /// How many arguments to pass inreg.
+ unsigned RegParm;
+
+ public:
+ typedef const ArgInfo *const_arg_iterator;
+ typedef ArgInfo *arg_iterator;
+
+ CGFunctionInfo(unsigned CallingConvention, bool NoReturn,
+ unsigned RegParm, CanQualType ResTy,
+ const CanQualType *ArgTys, unsigned NumArgTys);
+ ~CGFunctionInfo() { delete[] Args; }
+
+ const_arg_iterator arg_begin() const { return Args + 1; }
+ const_arg_iterator arg_end() const { return Args + 1 + NumArgs; }
+ arg_iterator arg_begin() { return Args + 1; }
+ arg_iterator arg_end() { return Args + 1 + NumArgs; }
+
+ unsigned arg_size() const { return NumArgs; }
+
+ bool isNoReturn() const { return NoReturn; }
+
+ /// getCallingConvention - Return the user specified calling
+ /// convention.
+ unsigned getCallingConvention() const { return CallingConvention; }
+
+ /// getEffectiveCallingConvention - Return the actual calling convention to
+ /// use, which may depend on the ABI.
+ unsigned getEffectiveCallingConvention() const {
+ return EffectiveCallingConvention;
+ }
+ void setEffectiveCallingConvention(unsigned Value) {
+ EffectiveCallingConvention = Value;
+ }
+
+ unsigned getRegParm() const { return RegParm; }
+
+ CanQualType getReturnType() const { return Args[0].type; }
+
+ ABIArgInfo &getReturnInfo() { return Args[0].info; }
+ const ABIArgInfo &getReturnInfo() const { return Args[0].info; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddInteger(getCallingConvention());
+ ID.AddBoolean(NoReturn);
+ ID.AddInteger(RegParm);
+ getReturnType().Profile(ID);
+ for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
+ it->type.Profile(ID);
+ }
+ template<class Iterator>
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const FunctionType::ExtInfo &Info,
+ CanQualType ResTy,
+ Iterator begin,
+ Iterator end) {
+ ID.AddInteger(Info.getCC());
+ ID.AddBoolean(Info.getNoReturn());
+ ID.AddInteger(Info.getRegParm());
+ ResTy.Profile(ID);
+ for (; begin != end; ++begin) {
+ CanQualType T = *begin; // force iterator to be over canonical types
+ T.Profile(ID);
+ }
+ }
+ };
+
+ /// ReturnValueSlot - Contains the address where the return value of a
+ /// function can be stored, and whether the address is volatile or not.
+ class ReturnValueSlot {
+ llvm::PointerIntPair<llvm::Value *, 1, bool> Value;
+
+ public:
+ ReturnValueSlot() {}
+ ReturnValueSlot(llvm::Value *Value, bool IsVolatile)
+ : Value(Value, IsVolatile) {}
+
+ bool isNull() const { return !getValue(); }
+
+ bool isVolatile() const { return Value.getInt(); }
+ llvm::Value *getValue() const { return Value.getPointer(); }
+ };
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
new file mode 100644
index 0000000..bf26799
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
@@ -0,0 +1,1363 @@
+//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of classes
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtCXX.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static uint64_t
+ComputeNonVirtualBaseClassOffset(ASTContext &Context,
+ const CXXRecordDecl *DerivedClass,
+ CastExpr::path_const_iterator Start,
+ CastExpr::path_const_iterator End) {
+ uint64_t Offset = 0;
+
+ const CXXRecordDecl *RD = DerivedClass;
+
+ for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ assert(!Base->isVirtual() && "Should not see virtual bases here!");
+
+ // Get the layout.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ // Add the offset.
+ Offset += Layout.getBaseClassOffset(BaseDecl);
+
+ RD = BaseDecl;
+ }
+
+ // FIXME: We should not use / 8 here.
+ return Offset / 8;
+}
+
+llvm::Constant *
+CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ uint64_t Offset =
+ ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
+ PathBegin, PathEnd);
+ if (!Offset)
+ return 0;
+
+ const llvm::Type *PtrDiffTy =
+ Types.ConvertType(getContext().getPointerDiffType());
+
+ return llvm::ConstantInt::get(PtrDiffTy, Offset);
+}
+
+/// Gets the address of a direct base class within a complete object.
+/// This should only be used for (1) non-virtual bases or (2) virtual bases
+/// when the type is known to be complete (e.g. in complete destructors).
+///
+/// The object pointed to by 'This' is assumed to be non-null.
+llvm::Value *
+CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual) {
+ // 'this' must be a pointer (in some address space) to Derived.
+ assert(This->getType()->isPointerTy() &&
+ cast<llvm::PointerType>(This->getType())->getElementType()
+ == ConvertType(Derived));
+
+ // Compute the offset of the virtual base.
+ uint64_t Offset;
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
+ if (BaseIsVirtual)
+ Offset = Layout.getVBaseClassOffset(Base);
+ else
+ Offset = Layout.getBaseClassOffset(Base);
+
+ // Shift and cast down to the base type.
+ // TODO: for complete types, this should be possible with a GEP.
+ llvm::Value *V = This;
+ if (Offset) {
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+ V = Builder.CreateBitCast(V, Int8PtrTy);
+ V = Builder.CreateConstInBoundsGEP1_64(V, Offset / 8);
+ }
+ V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
+
+ return V;
+}
+
+static llvm::Value *
+ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
+ uint64_t NonVirtual, llvm::Value *Virtual) {
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ llvm::Value *NonVirtualOffset = 0;
+ if (NonVirtual)
+ NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy, NonVirtual);
+
+ llvm::Value *BaseOffset;
+ if (Virtual) {
+ if (NonVirtualOffset)
+ BaseOffset = CGF.Builder.CreateAdd(Virtual, NonVirtualOffset);
+ else
+ BaseOffset = Virtual;
+ } else
+ BaseOffset = NonVirtualOffset;
+
+ // Apply the base offset.
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, Int8PtrTy);
+ ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
+
+ return ThisPtr;
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ CastExpr::path_const_iterator Start = PathBegin;
+ const CXXRecordDecl *VBase = 0;
+
+ // Get the virtual base.
+ if ((*Start)->isVirtual()) {
+ VBase =
+ cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
+ ++Start;
+ }
+
+ uint64_t NonVirtualOffset =
+ ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
+ Start, PathEnd);
+
+ // Get the base pointer type.
+ const llvm::Type *BasePtrTy =
+ ConvertType((PathEnd[-1])->getType())->getPointerTo();
+
+ if (!NonVirtualOffset && !VBase) {
+ // Just cast back.
+ return Builder.CreateBitCast(Value, BasePtrTy);
+ }
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = 0;
+
+ if (NullCheckValue) {
+ CastNull = createBasicBlock("cast.null");
+ CastNotNull = createBasicBlock("cast.notnull");
+ CastEnd = createBasicBlock("cast.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(Value,
+ llvm::Constant::getNullValue(Value->getType()));
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ llvm::Value *VirtualOffset = 0;
+
+ if (VBase)
+ VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+
+ // Apply the offsets.
+ Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset,
+ VirtualOffset);
+
+ // Cast back.
+ Value = Builder.CreateBitCast(Value, BasePtrTy);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastNull);
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
+ CastNull);
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ QualType DerivedTy =
+ getContext().getCanonicalType(getContext().getTagDeclType(Derived));
+ const llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
+
+ llvm::Value *NonVirtualOffset =
+ CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
+
+ if (!NonVirtualOffset) {
+ // No offset, we can just cast back.
+ return Builder.CreateBitCast(Value, DerivedPtrTy);
+ }
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = 0;
+
+ if (NullCheckValue) {
+ CastNull = createBasicBlock("cast.null");
+ CastNotNull = createBasicBlock("cast.notnull");
+ CastEnd = createBasicBlock("cast.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(Value,
+ llvm::Constant::getNullValue(Value->getType()));
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ // Apply the offset.
+ Value = Builder.CreatePtrToInt(Value, NonVirtualOffset->getType());
+ Value = Builder.CreateSub(Value, NonVirtualOffset);
+ Value = Builder.CreateIntToPtr(Value, DerivedPtrTy);
+
+ // Just cast.
+ Value = Builder.CreateBitCast(Value, DerivedPtrTy);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastNull);
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
+ CastNull);
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+/// GetVTTParameter - Return the VTT parameter that should be passed to a
+/// base constructor/destructor with virtual bases.
+static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
+ bool ForVirtualBase) {
+ if (!CodeGenVTables::needsVTTParameter(GD)) {
+ // This constructor/destructor does not need a VTT parameter.
+ return 0;
+ }
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(CGF.CurFuncDecl)->getParent();
+ const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ llvm::Value *VTT;
+
+ uint64_t SubVTTIndex;
+
+ // If the record matches the base, this is the complete ctor/dtor
+ // variant calling the base variant in a class with virtual bases.
+ if (RD == Base) {
+ assert(!CodeGenVTables::needsVTTParameter(CGF.CurGD) &&
+ "doing no-op VTT offset in base dtor/ctor?");
+ assert(!ForVirtualBase && "Can't have same class as virtual base!");
+ SubVTTIndex = 0;
+ } else {
+ const ASTRecordLayout &Layout =
+ CGF.getContext().getASTRecordLayout(RD);
+ uint64_t BaseOffset = ForVirtualBase ?
+ Layout.getVBaseClassOffset(Base) : Layout.getBaseClassOffset(Base);
+
+ SubVTTIndex =
+ CGF.CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
+ assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
+ }
+
+ if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
+ // A VTT parameter was passed to the constructor, use it.
+ VTT = CGF.LoadCXXVTT();
+ VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
+ } else {
+ // We're the complete constructor, so get the VTT by name.
+ VTT = CGF.CGM.getVTables().getVTT(RD);
+ VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
+ }
+
+ return VTT;
+}
+
+namespace {
+ /// Call the destructor for a direct base class.
+ struct CallBaseDtor : EHScopeStack::Cleanup {
+ const CXXRecordDecl *BaseClass;
+ bool BaseIsVirtual;
+ CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
+ : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const CXXRecordDecl *DerivedClass =
+ cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
+
+ const CXXDestructorDecl *D = BaseClass->getDestructor();
+ llvm::Value *Addr =
+ CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
+ DerivedClass, BaseClass,
+ BaseIsVirtual);
+ CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, Addr);
+ }
+ };
+}
+
+static void EmitBaseInitializer(CodeGenFunction &CGF,
+ const CXXRecordDecl *ClassDecl,
+ CXXBaseOrMemberInitializer *BaseInit,
+ CXXCtorType CtorType) {
+ assert(BaseInit->isBaseInitializer() &&
+ "Must have base initializer!");
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+
+ const Type *BaseType = BaseInit->getBaseClass();
+ CXXRecordDecl *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+
+ bool isBaseVirtual = BaseInit->isBaseVirtual();
+
+ // The base constructor doesn't construct virtual bases.
+ if (CtorType == Ctor_Base && isBaseVirtual)
+ return;
+
+ // We can pretend to be a complete class because it only matters for
+ // virtual bases, and we only do virtual bases for complete ctors.
+ llvm::Value *V =
+ CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
+ BaseClassDecl,
+ isBaseVirtual);
+
+ CGF.EmitAggExpr(BaseInit->getInit(), V, false, false, true);
+
+ if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor())
+ CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
+ isBaseVirtual);
+}
+
+static void EmitAggMemberInitializer(CodeGenFunction &CGF,
+ LValue LHS,
+ llvm::Value *ArrayIndexVar,
+ CXXBaseOrMemberInitializer *MemberInit,
+ QualType T,
+ unsigned Index) {
+ if (Index == MemberInit->getNumArrayIndices()) {
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
+
+ llvm::Value *Dest = LHS.getAddress();
+ if (ArrayIndexVar) {
+ // If we have an array index variable, load it and use it as an offset.
+ // Then, increment the value.
+ llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
+ Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
+ llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
+ Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
+ CGF.Builder.CreateStore(Next, ArrayIndexVar);
+ }
+
+ CGF.EmitAggExpr(MemberInit->getInit(), Dest,
+ LHS.isVolatileQualified(),
+ /*IgnoreResult*/ false,
+ /*IsInitializer*/ true);
+
+ return;
+ }
+
+ const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
+ assert(Array && "Array initialization without the array type?");
+ llvm::Value *IndexVar
+ = CGF.GetAddrOfLocalVar(MemberInit->getArrayIndex(Index));
+ assert(IndexVar && "Array index variable not loaded");
+
+ // Initialize this index variable to zero.
+ llvm::Value* Zero
+ = llvm::Constant::getNullValue(
+ CGF.ConvertType(CGF.getContext().getSizeType()));
+ CGF.Builder.CreateStore(Zero, IndexVar);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
+
+ CGF.EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
+ // Generate: if (loop-index < number-of-elements) fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ uint64_t NumElements = Array->getSize().getZExtValue();
+ llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
+ llvm::Value *NumElementsPtr =
+ llvm::ConstantInt::get(Counter->getType(), NumElements);
+ llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
+ "isless");
+
+ // If the condition is true, execute the body.
+ CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ CGF.EmitBlock(ForBody);
+ llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
+
+ {
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
+
+ // Inside the loop body recurse to emit the inner loop or, eventually, the
+ // constructor call.
+ EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit,
+ Array->getElementType(), Index + 1);
+ }
+
+ CGF.EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
+ Counter = CGF.Builder.CreateLoad(IndexVar);
+ NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
+ CGF.Builder.CreateStore(NextVal, IndexVar);
+
+ // Finally, branch back up to the condition for the next iteration.
+ CGF.EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ CGF.EmitBlock(AfterFor, true);
+}
+
+namespace {
+ struct CallMemberDtor : EHScopeStack::Cleanup {
+ FieldDecl *Field;
+ CXXDestructorDecl *Dtor;
+
+ CallMemberDtor(FieldDecl *Field, CXXDestructorDecl *Dtor)
+ : Field(Field), Dtor(Dtor) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // FIXME: Is this OK for C++0x delegating constructors?
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
+
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ LHS.getAddress());
+ }
+ };
+}
+
+static void EmitMemberInitializer(CodeGenFunction &CGF,
+ const CXXRecordDecl *ClassDecl,
+ CXXBaseOrMemberInitializer *MemberInit,
+ const CXXConstructorDecl *Constructor,
+ FunctionArgList &Args) {
+ assert(MemberInit->isMemberInitializer() &&
+ "Must have member initializer!");
+
+ // non-static data member initializers.
+ FieldDecl *Field = MemberInit->getMember();
+ QualType FieldType = CGF.getContext().getCanonicalType(Field->getType());
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ LValue LHS;
+
+ // If we are initializing an anonymous union field, drill down to the field.
+ if (MemberInit->getAnonUnionMember()) {
+ Field = MemberInit->getAnonUnionMember();
+ LHS = CGF.EmitLValueForAnonRecordField(ThisPtr, Field, 0);
+ FieldType = Field->getType();
+ } else {
+ LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
+ }
+
+ // FIXME: If there's no initializer and the CXXBaseOrMemberInitializer
+ // was implicitly generated, we shouldn't be zeroing memory.
+ RValue RHS;
+ if (FieldType->isReferenceType()) {
+ RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(), Field);
+ CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
+ } else if (FieldType->isArrayType() && !MemberInit->getInit()) {
+ CGF.EmitNullInitialization(LHS.getAddress(), Field->getType());
+ } else if (!CGF.hasAggregateLLVMType(Field->getType())) {
+ RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
+ CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
+ } else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
+ LHS.isVolatileQualified());
+ } else {
+ llvm::Value *ArrayIndexVar = 0;
+ const ConstantArrayType *Array
+ = CGF.getContext().getAsConstantArrayType(FieldType);
+ if (Array && Constructor->isImplicit() &&
+ Constructor->isCopyConstructor()) {
+ const llvm::Type *SizeTy
+ = CGF.ConvertType(CGF.getContext().getSizeType());
+
+ // The LHS is a pointer to the first object we'll be constructing, as
+ // a flat array.
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(LHS.getAddress(),
+ BasePtr);
+ LHS = CGF.MakeAddrLValue(BaseAddrPtr, BaseElementTy);
+
+ // Create an array index that will be used to walk over all of the
+ // objects we're constructing.
+ ArrayIndexVar = CGF.CreateTempAlloca(SizeTy, "object.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ CGF.Builder.CreateStore(Zero, ArrayIndexVar);
+
+ // If we are copying an array of scalars or classes with trivial copy
+ // constructors, perform a single aggregate copy.
+ const RecordType *Record = BaseElementTy->getAs<RecordType>();
+ if (!Record ||
+ cast<CXXRecordDecl>(Record->getDecl())->hasTrivialCopyConstructor()) {
+ // Find the source pointer. We knows it's the last argument because
+ // we know we're in a copy constructor.
+ unsigned SrcArgIndex = Args.size() - 1;
+ llvm::Value *SrcPtr
+ = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(Args[SrcArgIndex].first));
+ LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
+
+ // Copy the aggregate.
+ CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
+ LHS.isVolatileQualified());
+ return;
+ }
+
+ // Emit the block variables for the array indices, if any.
+ for (unsigned I = 0, N = MemberInit->getNumArrayIndices(); I != N; ++I)
+ CGF.EmitLocalBlockVarDecl(*MemberInit->getArrayIndex(I));
+ }
+
+ EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit, FieldType, 0);
+
+ if (!CGF.Exceptions)
+ return;
+
+ // FIXME: If we have an array of classes w/ non-trivial destructors,
+ // we need to destroy in reverse order of construction along the exception
+ // path.
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD->hasTrivialDestructor())
+ CGF.EHStack.pushCleanup<CallMemberDtor>(EHCleanup, Field,
+ RD->getDestructor());
+ }
+}
+
+/// Checks whether the given constructor is a valid subject for the
+/// complete-to-base constructor delegation optimization, i.e.
+/// emitting the complete constructor as a simple call to the base
+/// constructor.
+static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
+
+ // Currently we disable the optimization for classes with virtual
+ // bases because (1) the addresses of parameter variables need to be
+ // consistent across all initializers but (2) the delegate function
+ // call necessarily creates a second copy of the parameter variable.
+ //
+ // The limiting example (purely theoretical AFAIK):
+ // struct A { A(int &c) { c++; } };
+ // struct B : virtual A {
+ // B(int count) : A(count) { printf("%d\n", count); }
+ // };
+ // ...although even this example could in principle be emitted as a
+ // delegation since the address of the parameter doesn't escape.
+ if (Ctor->getParent()->getNumVBases()) {
+ // TODO: white-list trivial vbase initializers. This case wouldn't
+ // be subject to the restrictions below.
+
+ // TODO: white-list cases where:
+ // - there are no non-reference parameters to the constructor
+ // - the initializers don't access any non-reference parameters
+ // - the initializers don't take the address of non-reference
+ // parameters
+ // - etc.
+ // If we ever add any of the above cases, remember that:
+ // - function-try-blocks will always blacklist this optimization
+ // - we need to perform the constructor prologue and cleanup in
+ // EmitConstructorBody.
+
+ return false;
+ }
+
+ // We also disable the optimization for variadic functions because
+ // it's impossible to "re-pass" varargs.
+ if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
+ return false;
+
+ return true;
+}
+
+/// EmitConstructorBody - Emits the body of the current constructor.
+void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
+ const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
+ CXXCtorType CtorType = CurGD.getCtorType();
+
+ // Before we go any further, try the complete->base constructor
+ // delegation optimization.
+ if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor)) {
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitStopPoint(Builder);
+ EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
+ return;
+ }
+
+ Stmt *Body = Ctor->getBody();
+
+ // Enter the function-try-block before the constructor prologue if
+ // applicable.
+ bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
+ if (IsTryBody)
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+
+ EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
+
+ // Emit the constructor prologue, i.e. the base and member
+ // initializers.
+ EmitCtorPrologue(Ctor, CtorType, Args);
+
+ // Emit the body of the statement.
+ if (IsTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+
+ // Emit any cleanup blocks associated with the member or base
+ // initializers, which includes (along the exceptional path) the
+ // destructors for those members and bases that were fully
+ // constructed.
+ PopCleanupBlocks(CleanupDepth);
+
+ if (IsTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+}
+
+/// EmitCtorPrologue - This routine generates necessary code to initialize
+/// base classes and non-static data members belonging to this constructor.
+void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
+ CXXCtorType CtorType,
+ FunctionArgList &Args) {
+ const CXXRecordDecl *ClassDecl = CD->getParent();
+
+ llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> MemberInitializers;
+
+ for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
+ E = CD->init_end();
+ B != E; ++B) {
+ CXXBaseOrMemberInitializer *Member = (*B);
+
+ if (Member->isBaseInitializer())
+ EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
+ else
+ MemberInitializers.push_back(Member);
+ }
+
+ InitializeVTablePointers(ClassDecl);
+
+ for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I)
+ EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
+}
+
+/// EmitDestructorBody - Emits the body of the current destructor.
+void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
+ CXXDtorType DtorType = CurGD.getDtorType();
+
+ // The call to operator delete in a deleting destructor happens
+ // outside of the function-try-block, which means it's always
+ // possible to delegate the destructor body to the complete
+ // destructor. Do so.
+ if (DtorType == Dtor_Deleting) {
+ EnterDtorCleanups(Dtor, Dtor_Deleting);
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ LoadCXXThis());
+ PopCleanupBlock();
+ return;
+ }
+
+ Stmt *Body = Dtor->getBody();
+
+ // If the body is a function-try-block, enter the try before
+ // anything else.
+ bool isTryBody = (Body && isa<CXXTryStmt>(Body));
+ if (isTryBody)
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+
+ // Enter the epilogue cleanups.
+ RunCleanupsScope DtorEpilogue(*this);
+
+ // If this is the complete variant, just invoke the base variant;
+ // the epilogue will destruct the virtual bases. But we can't do
+ // this optimization if the body is a function-try-block, because
+ // we'd introduce *two* handler blocks.
+ switch (DtorType) {
+ case Dtor_Deleting: llvm_unreachable("already handled deleting case");
+
+ case Dtor_Complete:
+ // Enter the cleanup scopes for virtual bases.
+ EnterDtorCleanups(Dtor, Dtor_Complete);
+
+ if (!isTryBody) {
+ EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
+ LoadCXXThis());
+ break;
+ }
+ // Fallthrough: act like we're in the base variant.
+
+ case Dtor_Base:
+ // Enter the cleanup scopes for fields and non-virtual bases.
+ EnterDtorCleanups(Dtor, Dtor_Base);
+
+ // Initialize the vtable pointers before entering the body.
+ InitializeVTablePointers(Dtor->getParent());
+
+ if (isTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+ else {
+ assert(Dtor->isImplicit() && "bodyless dtor not implicit");
+ // nothing to do besides what's in the epilogue
+ }
+ break;
+ }
+
+ // Jump out through the epilogue cleanups.
+ DtorEpilogue.ForceCleanup();
+
+ // Exit the try if applicable.
+ if (isTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+}
+
+namespace {
+ /// Call the operator delete associated with the current destructor.
+ struct CallDtorDelete : EHScopeStack::Cleanup {
+ CallDtorDelete() {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
+ CGF.getContext().getTagDeclType(ClassDecl));
+ }
+ };
+
+ struct CallArrayFieldDtor : EHScopeStack::Cleanup {
+ const FieldDecl *Field;
+ CallArrayFieldDtor(const FieldDecl *Field) : Field(Field) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ QualType FieldType = Field->getType();
+ const ConstantArrayType *Array =
+ CGF.getContext().getAsConstantArrayType(FieldType);
+
+ QualType BaseType =
+ CGF.getContext().getBaseElementType(Array->getElementType());
+ const CXXRecordDecl *FieldClassDecl = BaseType->getAsCXXRecordDecl();
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ LValue LHS = CGF.EmitLValueForField(ThisPtr, Field,
+ // FIXME: Qualifiers?
+ /*CVRQualifiers=*/0);
+
+ const llvm::Type *BasePtr = CGF.ConvertType(BaseType)->getPointerTo();
+ llvm::Value *BaseAddrPtr =
+ CGF.Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ CGF.EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(),
+ Array, BaseAddrPtr);
+ }
+ };
+
+ struct CallFieldDtor : EHScopeStack::Cleanup {
+ const FieldDecl *Field;
+ CallFieldDtor(const FieldDecl *Field) : Field(Field) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const CXXRecordDecl *FieldClassDecl =
+ Field->getType()->getAsCXXRecordDecl();
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ LValue LHS = CGF.EmitLValueForField(ThisPtr, Field,
+ // FIXME: Qualifiers?
+ /*CVRQualifiers=*/0);
+
+ CGF.EmitCXXDestructorCall(FieldClassDecl->getDestructor(),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ LHS.getAddress());
+ }
+ };
+}
+
+/// EmitDtorEpilogue - Emit all code that comes at the end of class's
+/// destructor. This is to call destructors on members and base classes
+/// in reverse order of their construction.
+void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
+ CXXDtorType DtorType) {
+ assert(!DD->isTrivial() &&
+ "Should not emit dtor epilogue for trivial dtor!");
+
+ // The deleting-destructor phase just needs to call the appropriate
+ // operator delete that Sema picked up.
+ if (DtorType == Dtor_Deleting) {
+ assert(DD->getOperatorDelete() &&
+ "operator delete missing - EmitDtorEpilogue");
+ EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
+ return;
+ }
+
+ const CXXRecordDecl *ClassDecl = DD->getParent();
+
+ // The complete-destructor phase just destructs all the virtual bases.
+ if (DtorType == Dtor_Complete) {
+
+ // We push them in the forward order so that they'll be popped in
+ // the reverse order.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ ClassDecl->vbases_begin(), E = ClassDecl->vbases_end();
+ I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ true);
+ }
+
+ return;
+ }
+
+ assert(DtorType == Dtor_Base);
+
+ // Destroy non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+
+ // Ignore virtual bases.
+ if (Base.isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ false);
+ }
+
+ // Destroy direct fields.
+ llvm::SmallVector<const FieldDecl *, 16> FieldDecls;
+ for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
+ E = ClassDecl->field_end(); I != E; ++I) {
+ const FieldDecl *Field = *I;
+
+ QualType FieldType = getContext().getCanonicalType(Field->getType());
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(Array->getElementType());
+
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ if (!RT)
+ continue;
+
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (FieldClassDecl->hasTrivialDestructor())
+ continue;
+
+ if (Array)
+ EHStack.pushCleanup<CallArrayFieldDtor>(NormalAndEHCleanup, Field);
+ else
+ EHStack.pushCleanup<CallFieldDtor>(NormalAndEHCleanup, Field);
+ }
+}
+
+/// EmitCXXAggrConstructorCall - This routine essentially creates a (nested)
+/// for-loop to call the default constructor on individual members of the
+/// array.
+/// 'D' is the default constructor for elements of the array, 'ArrayTy' is the
+/// array type and 'ArrayPtr' points to the beginning fo the array.
+/// It is assumed that all relevant checks have been made by the caller.
+///
+/// \param ZeroInitialization True if each element should be zero-initialized
+/// before it is constructed.
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ const ConstantArrayType *ArrayTy,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization) {
+
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value * NumElements =
+ llvm::ConstantInt::get(SizeTy,
+ getContext().getConstantArrayElementCount(ArrayTy));
+
+ EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr, ArgBeg, ArgEnd,
+ ZeroInitialization);
+}
+
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ llvm::Value *NumElements,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization) {
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // Create a temporary for the loop index and initialize it with 0.
+ llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ Builder.CreateStore(Zero, IndexPtr);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index < number-of-elements fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(ArrayPtr, Counter,
+ "arrayidx");
+
+ // Zero initialize the storage, if requested.
+ if (ZeroInitialization)
+ EmitNullInitialization(Address,
+ getContext().getTypeDeclType(D->getParent()));
+
+ // C++ [class.temporary]p4:
+ // There are two contexts in which temporaries are destroyed at a different
+ // point than the end of the full-expression. The first context is when a
+ // default constructor is called to initialize an element of an array.
+ // If the constructor has one or more default arguments, the destruction of
+ // every temporary created in a default argument expression is sequenced
+ // before the construction of the next array element, if any.
+
+ // Keep track of the current number of live temporaries.
+ {
+ RunCleanupsScope Scope(*this);
+
+ EmitCXXConstructorCall(D, Ctor_Complete, /*ForVirtualBase=*/false, Address,
+ ArgBeg, ArgEnd);
+ }
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
+ Counter = Builder.CreateLoad(IndexPtr);
+ NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+ Builder.CreateStore(NextVal, IndexPtr);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+/// EmitCXXAggrDestructorCall - calls the default destructor on array
+/// elements in reverse order of construction.
+void
+CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This) {
+ const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
+ assert(CA && "Do we support VLA for destruction ?");
+ uint64_t ElementCount = getContext().getConstantArrayElementCount(CA);
+
+ const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
+ llvm::Value* ElementCountPtr = llvm::ConstantInt::get(SizeLTy, ElementCount);
+ EmitCXXAggrDestructorCall(D, ElementCountPtr, This);
+}
+
+/// EmitCXXAggrDestructorCall - calls the default destructor on array
+/// elements in reverse order of construction.
+void
+CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ llvm::Value *UpperCount,
+ llvm::Value *This) {
+ const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
+ llvm::Value *One = llvm::ConstantInt::get(SizeLTy, 1);
+
+ // Create a temporary for the loop index and initialize it with count of
+ // array elements.
+ llvm::Value *IndexPtr = CreateTempAlloca(SizeLTy, "loop.index");
+
+ // Store the number of elements in the index pointer.
+ Builder.CreateStore(UpperCount, IndexPtr);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index != 0 fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value* zeroConstant =
+ llvm::Constant::getNullValue(SizeLTy);
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsNE = Builder.CreateICmpNE(Counter, zeroConstant,
+ "isne");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsNE, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ Counter = Builder.CreateSub(Counter, One);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Address);
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the decrement of the loop counter.
+ Counter = Builder.CreateLoad(IndexPtr);
+ Counter = Builder.CreateSub(Counter, One, "dec");
+ Builder.CreateStore(Counter, IndexPtr);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+void
+CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type, bool ForVirtualBase,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ if (D->isTrivial()) {
+ if (ArgBeg == ArgEnd) {
+ // Trivial default constructor, no codegen required.
+ assert(D->isDefaultConstructor() &&
+ "trivial 0-arg ctor not a default ctor");
+ return;
+ }
+
+ assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ assert(D->isCopyConstructor() && "trivial 1-arg ctor not a copy ctor");
+
+ const Expr *E = (*ArgBeg);
+ QualType Ty = E->getType();
+ llvm::Value *Src = EmitLValue(E).getAddress();
+ EmitAggregateCopy(This, Src, Ty);
+ return;
+ }
+
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type), ForVirtualBase);
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+
+ EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, VTT, ArgBeg, ArgEnd);
+}
+
+void
+CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args) {
+ CallArgList DelegateArgs;
+
+ FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
+ assert(I != E && "no parameters to constructor");
+
+ // this
+ DelegateArgs.push_back(std::make_pair(RValue::get(LoadCXXThis()),
+ I->second));
+ ++I;
+
+ // vtt
+ if (llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(Ctor, CtorType),
+ /*ForVirtualBase=*/false)) {
+ QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
+ DelegateArgs.push_back(std::make_pair(RValue::get(VTT), VoidPP));
+
+ if (CodeGenVTables::needsVTTParameter(CurGD)) {
+ assert(I != E && "cannot skip vtt parameter, already done with args");
+ assert(I->second == VoidPP && "skipping parameter not of vtt type");
+ ++I;
+ }
+ }
+
+ // Explicit arguments.
+ for (; I != E; ++I) {
+ const VarDecl *Param = I->first;
+ QualType ArgType = Param->getType(); // because we're passing it to itself
+ RValue Arg = EmitDelegateCallArg(Param);
+
+ DelegateArgs.push_back(std::make_pair(Arg, ArgType));
+ }
+
+ EmitCall(CGM.getTypes().getFunctionInfo(Ctor, CtorType),
+ CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
+ ReturnValueSlot(), DelegateArgs, Ctor);
+}
+
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ bool ForVirtualBase,
+ llvm::Value *This) {
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
+ ForVirtualBase);
+ llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+
+ EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
+}
+
+namespace {
+ struct CallLocalDtor : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *Addr;
+
+ CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
+ : Dtor(D), Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Addr);
+ }
+ };
+}
+
+void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
+ llvm::Value *Addr) {
+ EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
+}
+
+void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
+ CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
+ if (!ClassDecl) return;
+ if (ClassDecl->hasTrivialDestructor()) return;
+
+ const CXXDestructorDecl *D = ClassDecl->getDestructor();
+ PushDestructorCleanup(D, Addr);
+}
+
+llvm::Value *
+CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+ llvm::Value *VTablePtr = Builder.CreateBitCast(This,
+ Int8PtrTy->getPointerTo());
+ VTablePtr = Builder.CreateLoad(VTablePtr, "vtable");
+
+ int64_t VBaseOffsetOffset =
+ CGM.getVTables().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
+
+ llvm::Value *VBaseOffsetPtr =
+ Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset, "vbase.offset.ptr");
+ const llvm::Type *PtrDiffTy =
+ ConvertType(getContext().getPointerDiffType());
+
+ VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
+ PtrDiffTy->getPointerTo());
+
+ llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
+
+ return VBaseOffset;
+}
+
+void
+CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Compute the address point.
+ llvm::Value *VTableAddressPoint;
+
+ // Check if we need to use a vtable from the VTT.
+ if (CodeGenVTables::needsVTTParameter(CurGD) &&
+ (RD->getNumVBases() || NearestVBase)) {
+ // Get the secondary vpointer index.
+ uint64_t VirtualPointerIndex =
+ CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
+
+ /// Load the VTT.
+ llvm::Value *VTT = LoadCXXVTT();
+ if (VirtualPointerIndex)
+ VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
+
+ // And load the address point from the VTT.
+ VTableAddressPoint = Builder.CreateLoad(VTT);
+ } else {
+ uint64_t AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass);
+ VTableAddressPoint =
+ Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
+ }
+
+ // Compute where to store the address point.
+ llvm::Value *VirtualOffset = 0;
+ uint64_t NonVirtualOffset = 0;
+
+ if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) {
+ // We need to use the virtual base offset offset because the virtual base
+ // might have a different offset in the most derived class.
+ VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass,
+ NearestVBase);
+ NonVirtualOffset = OffsetFromNearestVBase / 8;
+ } else {
+ // We can just use the base offset in the complete class.
+ NonVirtualOffset = Base.getBaseOffset() / 8;
+ }
+
+ // Apply the offsets.
+ llvm::Value *VTableField = LoadCXXThis();
+
+ if (NonVirtualOffset || VirtualOffset)
+ VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
+ NonVirtualOffset,
+ VirtualOffset);
+
+ // Finally, store the address point.
+ const llvm::Type *AddressPointPtrTy =
+ VTableAddressPoint->getType()->getPointerTo();
+ VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
+ Builder.CreateStore(VTableAddressPoint, VTableField);
+}
+
+void
+CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy& VBases) {
+ // If this base is a non-virtual primary base the address point has already
+ // been set.
+ if (!BaseIsNonVirtualPrimaryBase) {
+ // Initialize the vtable pointer for this base.
+ InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
+ VTable, VTableClass);
+ }
+
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Traverse bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ CXXRecordDecl *BaseDecl
+ = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore classes without a vtable.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ uint64_t BaseOffset;
+ uint64_t BaseOffsetFromNearestVBase;
+ bool BaseDeclIsNonVirtualPrimaryBase;
+
+ if (I->isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(VTableClass);
+
+ BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase = 0;
+ BaseDeclIsNonVirtualPrimaryBase = false;
+ } else {
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase =
+ OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
+ BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
+ }
+
+ InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
+ I->isVirtual() ? BaseDecl : NearestVBase,
+ BaseOffsetFromNearestVBase,
+ BaseDeclIsNonVirtualPrimaryBase,
+ VTable, VTableClass, VBases);
+ }
+}
+
+void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
+ // Ignore classes without a vtable.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Get the VTable.
+ llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD);
+
+ // Initialize the vtable pointers for this class and all of its bases.
+ VisitedVirtualBasesSetTy VBases;
+ InitializeVTablePointers(BaseSubobject(RD, 0), /*NearestVBase=*/0,
+ /*OffsetFromNearestVBase=*/0,
+ /*BaseIsNonVirtualPrimaryBase=*/false,
+ VTable, RD, VBases);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
new file mode 100644
index 0000000..406db88
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -0,0 +1,1916 @@
+//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the debug information generation while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/System/Path.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace clang;
+using namespace clang::CodeGen;
+
+CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
+ : CGM(CGM), DebugFactory(CGM.getModule()),
+ BlockLiteralGenericSet(false) {
+ CreateCompileUnit();
+}
+
+CGDebugInfo::~CGDebugInfo() {
+ assert(RegionStack.empty() && "Region stack mismatch, stack not empty!");
+}
+
+void CGDebugInfo::setLocation(SourceLocation Loc) {
+ if (Loc.isValid())
+ CurLoc = CGM.getContext().getSourceManager().getInstantiationLoc(Loc);
+}
+
+/// getContextDescriptor - Get context info for the decl.
+llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context,
+ llvm::DIDescriptor &CompileUnit) {
+ if (!Context)
+ return CompileUnit;
+
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
+ I = RegionMap.find(Context);
+ if (I != RegionMap.end())
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(I->second));
+
+ // Check namespace.
+ if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
+ return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl, CompileUnit));
+
+ if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context)) {
+ if (!RDecl->isDependentType()) {
+ llvm::DIType Ty = getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ llvm::DIFile(CompileUnit));
+ return llvm::DIDescriptor(Ty);
+ }
+ }
+ return CompileUnit;
+}
+
+/// getFunctionName - Get function name for the given FunctionDecl. If the
+/// name is constructred on demand (e.g. C++ destructor) then the name
+/// is stored on the side.
+llvm::StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
+ assert (FD && "Invalid FunctionDecl!");
+ IdentifierInfo *FII = FD->getIdentifier();
+ if (FII)
+ return FII->getName();
+
+ // Otherwise construct human readable name for debug info.
+ std::string NS = FD->getNameAsString();
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(NS.length());
+ memcpy(StrPtr, NS.data(), NS.length());
+ return llvm::StringRef(StrPtr, NS.length());
+}
+
+llvm::StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
+ llvm::SmallString<256> MethodName;
+ llvm::raw_svector_ostream OS(MethodName);
+ OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
+ const DeclContext *DC = OMD->getDeclContext();
+ if (const ObjCImplementationDecl *OID = dyn_cast<const ObjCImplementationDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const ObjCCategoryImplDecl *OCD = dyn_cast<const ObjCCategoryImplDecl>(DC)){
+ OS << ((NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
+ OCD->getIdentifier()->getNameStart() << ')';
+ }
+ OS << ' ' << OMD->getSelector().getAsString() << ']';
+
+ char *StrPtr = DebugInfoNames.Allocate<char>(OS.tell());
+ memcpy(StrPtr, MethodName.begin(), OS.tell());
+ return llvm::StringRef(StrPtr, OS.tell());
+}
+
+/// getClassName - Get class name including template argument list.
+llvm::StringRef
+CGDebugInfo::getClassName(RecordDecl *RD) {
+ ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD);
+ if (!Spec)
+ return RD->getName();
+
+ const TemplateArgument *Args;
+ unsigned NumArgs;
+ std::string Buffer;
+ if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) {
+ const TemplateSpecializationType *TST =
+ cast<TemplateSpecializationType>(TAW->getType());
+ Args = TST->getArgs();
+ NumArgs = TST->getNumArgs();
+ } else {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ Args = TemplateArgs.getFlatArgumentList();
+ NumArgs = TemplateArgs.flat_size();
+ }
+ Buffer = RD->getIdentifier()->getNameStart();
+ PrintingPolicy Policy(CGM.getLangOptions());
+ Buffer += TemplateSpecializationType::PrintTemplateArgumentList(Args,
+ NumArgs,
+ Policy);
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(Buffer.length());
+ memcpy(StrPtr, Buffer.data(), Buffer.length());
+ return llvm::StringRef(StrPtr, Buffer.length());
+
+}
+
+/// getOrCreateFile - Get the file debug info descriptor for the input location.
+llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
+ if (!Loc.isValid())
+ // If Location is not valid then use main input file.
+ return DebugFactory.CreateFile(TheCU.getFilename(), TheCU.getDirectory(),
+ TheCU);
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+
+ // Cache the results.
+ const char *fname = PLoc.getFilename();
+ llvm::DenseMap<const char *, llvm::WeakVH>::iterator it =
+ DIFileCache.find(fname);
+
+ if (it != DIFileCache.end()) {
+ // Verify that the information still exists.
+ if (&*it->second)
+ return llvm::DIFile(cast<llvm::MDNode>(it->second));
+ }
+
+ llvm::DIFile F = DebugFactory.CreateFile(PLoc.getFilename(),
+ getCurrentDirname(), TheCU);
+
+ DIFileCache[fname] = F;
+ return F;
+
+}
+
+/// getLineNumber - Get line number for the location. If location is invalid
+/// then use current location.
+unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
+ assert (CurLoc.isValid() && "Invalid current location!");
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.getLine();
+}
+
+/// getColumnNumber - Get column number for the location. If location is
+/// invalid then use current location.
+unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
+ assert (CurLoc.isValid() && "Invalid current location!");
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.getColumn();
+}
+
+llvm::StringRef CGDebugInfo::getCurrentDirname() {
+ if (!CWDName.empty())
+ return CWDName;
+ char *CompDirnamePtr = NULL;
+ llvm::sys::Path CWD = llvm::sys::Path::GetCurrentDirectory();
+ CompDirnamePtr = DebugInfoNames.Allocate<char>(CWD.size());
+ memcpy(CompDirnamePtr, CWD.c_str(), CWD.size());
+ return CWDName = llvm::StringRef(CompDirnamePtr, CWD.size());
+}
+
+/// CreateCompileUnit - Create new compile unit.
+void CGDebugInfo::CreateCompileUnit() {
+
+ // Get absolute path name.
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ std::string MainFileName = CGM.getCodeGenOpts().MainFileName;
+ if (MainFileName.empty())
+ MainFileName = "<unknown>";
+
+ // The main file name provided via the "-main-file-name" option contains just
+ // the file name itself with no path information. This file name may have had
+ // a relative path, so we look into the actual file entry for the main
+ // file to determine the real absolute path for the file.
+ std::string MainFileDir;
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ MainFileDir = MainFile->getDir()->getName();
+ if (MainFileDir != ".")
+ MainFileName = MainFileDir + "/" + MainFileName;
+ }
+
+ // Save filename string.
+ char *FilenamePtr = DebugInfoNames.Allocate<char>(MainFileName.length());
+ memcpy(FilenamePtr, MainFileName.c_str(), MainFileName.length());
+ llvm::StringRef Filename(FilenamePtr, MainFileName.length());
+
+ unsigned LangTag;
+ const LangOptions &LO = CGM.getLangOptions();
+ if (LO.CPlusPlus) {
+ if (LO.ObjC1)
+ LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
+ else
+ LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
+ } else if (LO.ObjC1) {
+ LangTag = llvm::dwarf::DW_LANG_ObjC;
+ } else if (LO.C99) {
+ LangTag = llvm::dwarf::DW_LANG_C99;
+ } else {
+ LangTag = llvm::dwarf::DW_LANG_C89;
+ }
+
+ std::string Producer = getClangFullVersion();
+
+ // Figure out which version of the ObjC runtime we have.
+ unsigned RuntimeVers = 0;
+ if (LO.ObjC1)
+ RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
+
+ // Create new compile unit.
+ TheCU = DebugFactory.CreateCompileUnit(
+ LangTag, Filename, getCurrentDirname(),
+ Producer, true,
+ LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers);
+}
+
+/// CreateType - Get the Basic type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
+ llvm::DIFile Unit) {
+ unsigned Encoding = 0;
+ const char *BTName = NULL;
+ switch (BT->getKind()) {
+ default:
+ case BuiltinType::Void:
+ return llvm::DIType();
+ case BuiltinType::ObjCClass:
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
+ Unit, "objc_class", Unit, 0, 0, 0, 0,
+ llvm::DIType::FlagFwdDecl,
+ llvm::DIType(), llvm::DIArray());
+ case BuiltinType::ObjCId: {
+ // typedef struct objc_class *Class;
+ // typedef struct objc_object {
+ // Class isa;
+ // } *id;
+
+ llvm::DIType OCTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
+ Unit, "objc_class", Unit, 0, 0, 0, 0,
+ llvm::DIType::FlagFwdDecl,
+ llvm::DIType(), llvm::DIArray());
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+
+ llvm::DIType ISATy =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+ Unit, "", Unit,
+ 0, Size, 0, 0, 0, OCTy);
+
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ llvm::DIType FieldTy =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "isa", Unit,
+ 0,Size, 0, 0, 0, ISATy);
+ EltTys.push_back(FieldTy);
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
+ Unit, "objc_object", Unit, 0, 0, 0, 0,
+ 0,
+ llvm::DIType(), Elements);
+ }
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
+ case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
+ case BuiltinType::Float:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
+ }
+
+ switch (BT->getKind()) {
+ case BuiltinType::Long: BTName = "long int"; break;
+ case BuiltinType::LongLong: BTName = "long long int"; break;
+ case BuiltinType::ULong: BTName = "long unsigned int"; break;
+ case BuiltinType::ULongLong: BTName = "long long unsigned int"; break;
+ default:
+ BTName = BT->getName(CGM.getContext().getLangOptions());
+ break;
+ }
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(BT);
+ uint64_t Align = CGM.getContext().getTypeAlign(BT);
+ uint64_t Offset = 0;
+
+ llvm::DIType DbgTy =
+ DebugFactory.CreateBasicType(Unit, BTName,
+ Unit, 0, Size, Align,
+ Offset, /*flags*/ 0, Encoding);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty,
+ llvm::DIFile Unit) {
+ // Bit size, align and offset of the type.
+ unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
+ if (Ty->isComplexIntegerType())
+ Encoding = llvm::dwarf::DW_ATE_lo_user;
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ uint64_t Offset = 0;
+
+ llvm::DIType DbgTy =
+ DebugFactory.CreateBasicType(Unit, "complex",
+ Unit, 0, Size, Align,
+ Offset, /*flags*/ 0, Encoding);
+ return DbgTy;
+}
+
+/// CreateCVRType - Get the qualified type from the cache or create
+/// a new one if necessary.
+llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) {
+ QualifierCollector Qc;
+ const Type *T = Qc.strip(Ty);
+
+ // Ignore these qualifiers for now.
+ Qc.removeObjCGCAttr();
+ Qc.removeAddressSpace();
+
+ // We will create one Derived type for one qualifier and recurse to handle any
+ // additional ones.
+ unsigned Tag;
+ if (Qc.hasConst()) {
+ Tag = llvm::dwarf::DW_TAG_const_type;
+ Qc.removeConst();
+ } else if (Qc.hasVolatile()) {
+ Tag = llvm::dwarf::DW_TAG_volatile_type;
+ Qc.removeVolatile();
+ } else if (Qc.hasRestrict()) {
+ Tag = llvm::dwarf::DW_TAG_restrict_type;
+ Qc.removeRestrict();
+ } else {
+ assert(Qc.empty() && "Unknown type qualifier for debug info");
+ return getOrCreateType(QualType(T, 0), Unit);
+ }
+
+ llvm::DIType FromTy = getOrCreateType(Qc.apply(T), Unit);
+
+ // No need to fill in the Name, Line, Size, Alignment, Offset in case of
+ // CVR derived types.
+ llvm::DIType DbgTy =
+ DebugFactory.CreateDerivedType(Tag, Unit, "", Unit,
+ 0, 0, 0, 0, 0, FromTy);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DIFile Unit) {
+ llvm::DIType DbgTy =
+ CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ Ty->getPointeeType(), Unit);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
+ const Type *Ty,
+ QualType PointeeTy,
+ llvm::DIFile Unit) {
+ llvm::DIType EltTy = getOrCreateType(PointeeTy, Unit);
+
+ // Bit size, align and offset of the type.
+
+ // Size is always the size of a pointer. We can't use getTypeSize here
+ // because that does not return the correct value for references.
+ uint64_t Size =
+ CGM.getContext().Target.getPointerWidth(PointeeTy.getAddressSpace());
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ return
+ DebugFactory.CreateDerivedType(Tag, Unit, "", Unit,
+ 0, Size, Align, 0, 0, EltTy);
+
+}
+
+llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
+ llvm::DIFile Unit) {
+ if (BlockLiteralGenericSet)
+ return BlockLiteralGeneric;
+
+ unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+ llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+ llvm::DIType FieldTy;
+
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIArray Elements;
+ llvm::DIType EltTy, DescTy;
+
+ FieldOffset = 0;
+ FType = CGM.getContext().UnsignedLongTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset));
+
+ Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ EltTys.clear();
+
+ unsigned Flags = llvm::DIType::FlagAppleBlock;
+ unsigned LineNo = getLineNumber(CurLoc);
+
+ EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor",
+ Unit, LineNo, FieldOffset, 0, 0,
+ Flags, llvm::DIType(), Elements);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+ Unit, "", Unit,
+ LineNo, Size, Align, 0, 0, EltTy);
+
+ FieldOffset = 0;
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
+
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ FieldTy = DescTy;
+ FieldSize = CGM.getContext().getTypeSize(Ty);
+ FieldAlign = CGM.getContext().getTypeAlign(Ty);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__descriptor", Unit,
+ LineNo, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic",
+ Unit, LineNo, FieldOffset, 0, 0,
+ Flags, llvm::DIType(), Elements);
+
+ BlockLiteralGenericSet = true;
+ BlockLiteralGeneric
+ = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
+ "", Unit,
+ LineNo, Size, Align, 0, 0, EltTy);
+ return BlockLiteralGeneric;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
+ llvm::DIFile Unit) {
+ // Typedefs are derived from some other type. If we have a typedef of a
+ // typedef, make sure to emit the whole chain.
+ llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
+
+ // We don't set size information, but do specify where the typedef was
+ // declared.
+ unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
+
+ llvm::DIDescriptor TyContext
+ = getContextDescriptor(dyn_cast<Decl>(Ty->getDecl()->getDeclContext()),
+ Unit);
+ llvm::DIType DbgTy =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_typedef,
+ TyContext,
+ Ty->getDecl()->getName(), Unit,
+ Line, 0, 0, 0, 0, Src);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
+ llvm::DIFile Unit) {
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ // Add the result type at least.
+ EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
+
+ // Set up remainder of arguments if there is a prototype.
+ // FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
+ if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
+ } else {
+ // FIXME: Handle () case in C. llvm-gcc doesn't do it either.
+ }
+
+ llvm::DIArray EltTypeArray =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ llvm::DIType DbgTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0,
+ llvm::DIType(), EltTypeArray);
+ return DbgTy;
+}
+
+/// CollectRecordFields - A helper function to collect debug info for
+/// record fields. This is used while creating debug info entry for a Record.
+void CGDebugInfo::
+CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+ unsigned FieldNo = 0;
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ FieldDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+ llvm::StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields. Do not ignore unnamed records.
+ if (FieldName.empty() && !isa<RecordType>(Field->getType()))
+ continue;
+
+ // Get the location for the field.
+ llvm::DIFile FieldDefUnit = getOrCreateFile(Field->getLocation());
+ unsigned FieldLine = getLineNumber(Field->getLocation());
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = CGM.getContext().getTypeSize(FType);
+ Expr *BitWidth = Field->getBitWidth();
+ if (BitWidth)
+ FieldSize = BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ FieldAlign = CGM.getContext().getTypeAlign(FType);
+ }
+
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
+ unsigned Flags = 0;
+ AccessSpecifier Access = I->getAccess();
+ if (Access == clang::AS_private)
+ Flags |= llvm::DIType::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ Flags |= llvm::DIType::FlagProtected;
+
+ // Create a DW_TAG_member node to remember the offset of this field in the
+ // struct. FIXME: This is an absolutely insane way to capture this
+ // information. When we gut debug info, this should be fixed.
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy);
+ EltTys.push_back(FieldTy);
+ }
+}
+
+/// getOrCreateMethodType - CXXMethodDecl's type is a FunctionType. This
+/// function type is not updated to include implicit "this" pointer. Use this
+/// routine to get a method type which includes "this" pointer.
+llvm::DIType
+CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile Unit) {
+ llvm::DIType FnTy
+ = getOrCreateType(QualType(Method->getType()->getAs<FunctionProtoType>(),
+ 0),
+ Unit);
+
+ unsigned BFlags=0;
+ AccessSpecifier Access = Method->getAccess();
+ if (Access == clang::AS_private)
+ BFlags |= llvm::DIType::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ BFlags |= llvm::DIType::FlagProtected;
+
+ // Add "this" pointer.
+
+ llvm::DIArray Args = llvm::DICompositeType(FnTy).getTypeArray();
+ assert (Args.getNumElements() && "Invalid number of arguments!");
+
+ llvm::SmallVector<llvm::DIDescriptor, 16> Elts;
+
+ // First element is always return type. For 'void' functions it is NULL.
+ Elts.push_back(Args.getElement(0));
+
+ if (!Method->isStatic())
+ {
+ // "this" pointer is always first argument.
+ ASTContext &Context = CGM.getContext();
+ QualType ThisPtr =
+ Context.getPointerType(Context.getTagDeclType(Method->getParent()));
+ llvm::DIType ThisPtrType =
+ DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
+
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ Elts.push_back(ThisPtrType);
+ }
+
+ // Copy rest of the arguments.
+ for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i)
+ Elts.push_back(Args.getElement(i));
+
+ llvm::DIArray EltTypeArray =
+ DebugFactory.GetOrCreateArray(Elts.data(), Elts.size());
+
+ return
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0,
+ llvm::DIType(), EltTypeArray);
+}
+
+/// CreateCXXMemberFunction - A helper function to create a DISubprogram for
+/// a single member function GlobalDecl.
+llvm::DISubprogram
+CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
+ llvm::DIFile Unit,
+ llvm::DIType RecordTy) {
+ bool IsCtorOrDtor =
+ isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
+
+ llvm::StringRef MethodName = getFunctionName(Method);
+ llvm::DIType MethodTy = getOrCreateMethodType(Method, Unit);
+
+ // Since a single ctor/dtor corresponds to multiple functions, it doesn't
+ // make sense to give a single ctor/dtor a linkage name.
+ llvm::StringRef MethodLinkageName;
+ if (!IsCtorOrDtor)
+ MethodLinkageName = CGM.getMangledName(Method);
+
+ // Get the location for the method.
+ llvm::DIFile MethodDefUnit = getOrCreateFile(Method->getLocation());
+ unsigned MethodLine = getLineNumber(Method->getLocation());
+
+ // Collect virtual method info.
+ llvm::DIType ContainingType;
+ unsigned Virtuality = 0;
+ unsigned VIndex = 0;
+
+ if (Method->isVirtual()) {
+ if (Method->isPure())
+ Virtuality = llvm::dwarf::DW_VIRTUALITY_pure_virtual;
+ else
+ Virtuality = llvm::dwarf::DW_VIRTUALITY_virtual;
+
+ // It doesn't make sense to give a virtual destructor a vtable index,
+ // since a single destructor has two entries in the vtable.
+ if (!isa<CXXDestructorDecl>(Method))
+ VIndex = CGM.getVTables().getMethodVTableIndex(Method);
+ ContainingType = RecordTy;
+ }
+
+ llvm::DISubprogram SP =
+ DebugFactory.CreateSubprogram(RecordTy , MethodName, MethodName,
+ MethodLinkageName,
+ MethodDefUnit, MethodLine,
+ MethodTy, /*isLocalToUnit=*/false,
+ /* isDefintion=*/ false,
+ Virtuality, VIndex, ContainingType,
+ Method->isImplicit(),
+ CGM.getLangOptions().Optimize);
+
+ // Don't cache ctors or dtors since we have to emit multiple functions for
+ // a single ctor or dtor.
+ if (!IsCtorOrDtor && Method->isThisDeclarationADefinition())
+ SPCache[Method] = llvm::WeakVH(SP);
+
+ return SP;
+}
+
+/// CollectCXXMemberFunctions - A helper function to collect debug info for
+/// C++ member functions.This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DIType RecordTy) {
+ for(CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *Method = *I;
+
+ if (Method->isImplicit() && !Method->isUsed())
+ continue;
+
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ }
+}
+
+/// CollectCXXFriends - A helper function to collect debug info for
+/// C++ base classes. This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DIType RecordTy) {
+
+ for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
+ BE = RD->friend_end(); BI != BE; ++BI) {
+
+ TypeSourceInfo *TInfo = (*BI)->getFriendType();
+ if(TInfo)
+ {
+ llvm::DIType Ty = getOrCreateType(TInfo->getType(), Unit);
+
+ llvm::DIType DTy =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_friend,
+ RecordTy, llvm::StringRef(),
+ Unit, 0, 0, 0,
+ 0, 0, Ty);
+
+ EltTys.push_back(DTy);
+ }
+
+ }
+}
+
+/// CollectCXXBases - A helper function to collect debug info for
+/// C++ base classes. This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DIType RecordTy) {
+
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ for (CXXRecordDecl::base_class_const_iterator BI = RD->bases_begin(),
+ BE = RD->bases_end(); BI != BE; ++BI) {
+ unsigned BFlags = 0;
+ uint64_t BaseOffset;
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(BI->getType()->getAs<RecordType>()->getDecl());
+
+ if (BI->isVirtual()) {
+ // virtual base offset offset is -ve. The code generator emits dwarf
+ // expression where it expects +ve number.
+ BaseOffset = 0 - CGM.getVTables().getVirtualBaseOffsetOffset(RD, Base);
+ BFlags = llvm::DIType::FlagVirtual;
+ } else
+ BaseOffset = RL.getBaseClassOffset(Base);
+
+ AccessSpecifier Access = BI->getAccessSpecifier();
+ if (Access == clang::AS_private)
+ BFlags |= llvm::DIType::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ BFlags |= llvm::DIType::FlagProtected;
+
+ llvm::DIType DTy =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
+ RecordTy, llvm::StringRef(),
+ Unit, 0, 0, 0,
+ BaseOffset, BFlags,
+ getOrCreateType(BI->getType(),
+ Unit));
+ EltTys.push_back(DTy);
+ }
+}
+
+/// getOrCreateVTablePtrType - Return debug info descriptor for vtable.
+llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
+ if (VTablePtrType.isValid())
+ return VTablePtrType;
+
+ ASTContext &Context = CGM.getContext();
+
+ /* Function type */
+ llvm::DIDescriptor STy = getOrCreateType(Context.IntTy, Unit);
+ llvm::DIArray SElements = DebugFactory.GetOrCreateArray(&STy, 1);
+ llvm::DIType SubTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0, llvm::DIType(), SElements);
+
+ unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
+ llvm::DIType vtbl_ptr_type
+ = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+ Unit, "__vtbl_ptr_type", Unit,
+ 0, Size, 0, 0, 0, SubTy);
+
+ VTablePtrType =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+ Unit, "", Unit,
+ 0, Size, 0, 0, 0, vtbl_ptr_type);
+ return VTablePtrType;
+}
+
+/// getVTableName - Get vtable name for the given Class.
+llvm::StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
+ // Otherwise construct gdb compatible name name.
+ std::string Name = "_vptr$" + RD->getNameAsString();
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(Name.length());
+ memcpy(StrPtr, Name.data(), Name.length());
+ return llvm::StringRef(StrPtr, Name.length());
+}
+
+
+/// CollectVTableInfo - If the C++ class has vtable info then insert appropriate
+/// debug info entry in EltTys vector.
+void CGDebugInfo::
+CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+
+ // If there is a primary base then it will hold vtable info.
+ if (RL.getPrimaryBase())
+ return;
+
+ // If this class is not dynamic then there is not any vtable info to collect.
+ if (!RD->isDynamicClass())
+ return;
+
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+ llvm::DIType VPTR
+ = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ getVTableName(RD), Unit,
+ 0, Size, 0, 0, 0,
+ getOrCreateVTablePtrType(Unit));
+ EltTys.push_back(VPTR);
+}
+
+/// CreateType - get structure or union type.
+llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
+ llvm::DIFile Unit) {
+ RecordDecl *RD = Ty->getDecl();
+
+ unsigned Tag;
+ if (RD->isStruct())
+ Tag = llvm::dwarf::DW_TAG_structure_type;
+ else if (RD->isUnion())
+ Tag = llvm::dwarf::DW_TAG_union_type;
+ else {
+ assert(RD->isClass() && "Unknown RecordType!");
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+
+ // Records and classes and unions can all be recursive. To handle them, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+ llvm::DIDescriptor FDContext =
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+
+ // If this is just a forward declaration, construct an appropriately
+ // marked node and just return it.
+ if (!RD->getDefinition()) {
+ llvm::DICompositeType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, FDContext, RD->getName(),
+ DefUnit, Line, 0, 0, 0,
+ llvm::DIType::FlagFwdDecl,
+ llvm::DIType(), llvm::DIArray());
+
+ return FwdDecl;
+ }
+
+ llvm::DIType FwdDecl = DebugFactory.CreateTemporaryType();
+
+ llvm::MDNode *MN = FwdDecl;
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
+ // Otherwise, insert it into the TypeCache so that recursive uses will find
+ // it.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+ // Push the struct on region stack.
+ RegionStack.push_back(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+
+ // Convert all the elements.
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ if (CXXDecl) {
+ CollectCXXBases(CXXDecl, Unit, EltTys, FwdDecl);
+ CollectVTableInfo(CXXDecl, Unit, EltTys);
+ }
+
+ // Collect static variables with initializers.
+ for (RecordDecl::decl_iterator I = RD->decls_begin(), E = RD->decls_end();
+ I != E; ++I)
+ if (const VarDecl *V = dyn_cast<VarDecl>(*I)) {
+ if (const Expr *Init = V->getInit()) {
+ Expr::EvalResult Result;
+ if (Init->Evaluate(Result, CGM.getContext()) && Result.Val.isInt()) {
+ llvm::ConstantInt *CI
+ = llvm::ConstantInt::get(CGM.getLLVMContext(), Result.Val.getInt());
+
+ // Create the descriptor for static variable.
+ llvm::DIFile VUnit = getOrCreateFile(V->getLocation());
+ llvm::StringRef VName = V->getName();
+ llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
+ // Do not use DIGlobalVariable for enums.
+ if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
+ DebugFactory.CreateGlobalVariable(FwdDecl, VName, VName, VName, VUnit,
+ getLineNumber(V->getLocation()),
+ VTy, true, true, CI);
+ }
+ }
+ }
+ }
+
+ CollectRecordFields(RD, Unit, EltTys);
+ llvm::MDNode *ContainingType = NULL;
+ if (CXXDecl) {
+ CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
+ CollectCXXFriends(CXXDecl, Unit, EltTys, FwdDecl);
+
+ // A class's primary base or the class itself contains the vtable.
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (const CXXRecordDecl *PBase = RL.getPrimaryBase())
+ ContainingType =
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit);
+ else if (CXXDecl->isDynamicClass())
+ ContainingType = FwdDecl;
+ }
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ RegionStack.pop_back();
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
+ RegionMap.find(Ty->getDecl());
+ if (RI != RegionMap.end())
+ RegionMap.erase(RI);
+
+ llvm::DIDescriptor RDContext =
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+
+ llvm::StringRef RDName = RD->getName();
+ // If this is a class, include the template arguments also.
+ if (Tag == llvm::dwarf::DW_TAG_class_type)
+ RDName = getClassName(RD);
+
+ llvm::DICompositeType RealDecl =
+ DebugFactory.CreateCompositeType(Tag, RDContext,
+ RDName,
+ DefUnit, Line, Size, Align, 0, 0,
+ llvm::DIType(), Elements,
+ 0, ContainingType);
+
+ // Now that we have a real decl for the struct, replace anything using the
+ // old decl with the new one. This will recursively update the debug info.
+ llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl);
+ RegionMap[RD] = llvm::WeakVH(RealDecl);
+ return RealDecl;
+}
+
+/// CreateType - get objective-c object type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty,
+ llvm::DIFile Unit) {
+ // Ignore protocols.
+ return getOrCreateType(Ty->getBaseType(), Unit);
+}
+
+/// CreateType - get objective-c interface type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
+ llvm::DIFile Unit) {
+ ObjCInterfaceDecl *ID = Ty->getDecl();
+ unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation());
+ unsigned Line = getLineNumber(ID->getLocation());
+ unsigned RuntimeLang = TheCU.getLanguage();
+
+ // If this is just a forward declaration, return a special forward-declaration
+ // debug type.
+ if (ID->isForwardDecl()) {
+ llvm::DICompositeType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(),
+ DefUnit, Line, 0, 0, 0, 0,
+ llvm::DIType(), llvm::DIArray(),
+ RuntimeLang);
+ return FwdDecl;
+ }
+
+ // To handle recursive interface, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+ llvm::DIType FwdDecl = DebugFactory.CreateTemporaryType();
+
+ llvm::MDNode *MN = FwdDecl;
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
+ // Otherwise, insert it into the TypeCache so that recursive uses will find
+ // it.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+ // Push the struct on region stack.
+ RegionStack.push_back(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+
+ // Convert all the elements.
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ ObjCInterfaceDecl *SClass = ID->getSuperClass();
+ if (SClass) {
+ llvm::DIType SClassTy =
+ getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
+ llvm::DIType InhTag =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
+ Unit, "", Unit, 0, 0, 0,
+ 0 /* offset */, 0, SClassTy);
+ EltTys.push_back(InhTag);
+ }
+
+ const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
+
+ unsigned FieldNo = 0;
+ for (ObjCInterfaceDecl::ivar_iterator I = ID->ivar_begin(),
+ E = ID->ivar_end(); I != E; ++I, ++FieldNo) {
+ ObjCIvarDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+
+ llvm::StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields.
+ if (FieldName.empty())
+ continue;
+
+ // Get the location for the field.
+ llvm::DIFile FieldDefUnit = getOrCreateFile(Field->getLocation());
+ unsigned FieldLine = getLineNumber(Field->getLocation());
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = CGM.getContext().getTypeSize(FType);
+ Expr *BitWidth = Field->getBitWidth();
+ if (BitWidth)
+ FieldSize = BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+
+ FieldAlign = CGM.getContext().getTypeAlign(FType);
+ }
+
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
+ unsigned Flags = 0;
+ if (Field->getAccessControl() == ObjCIvarDecl::Protected)
+ Flags = llvm::DIType::FlagProtected;
+ else if (Field->getAccessControl() == ObjCIvarDecl::Private)
+ Flags = llvm::DIType::FlagPrivate;
+
+ // Create a DW_TAG_member node to remember the offset of this field in the
+ // struct. FIXME: This is an absolutely insane way to capture this
+ // information. When we gut debug info, this should be fixed.
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy);
+ EltTys.push_back(FieldTy);
+ }
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ RegionStack.pop_back();
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
+ RegionMap.find(Ty->getDecl());
+ if (RI != RegionMap.end())
+ RegionMap.erase(RI);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ llvm::DICompositeType RealDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(), DefUnit,
+ Line, Size, Align, 0, 0, llvm::DIType(),
+ Elements, RuntimeLang);
+
+ // Now that we have a real decl for the struct, replace anything using the
+ // old decl with the new one. This will recursively update the debug info.
+ llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl);
+ RegionMap[ID] = llvm::WeakVH(RealDecl);
+
+ return RealDecl;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
+ llvm::DIFile Unit) {
+ return CreateEnumType(Ty->getDecl(), Unit);
+
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
+ llvm::DIFile Unit) {
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ return CreateType(RT, Unit);
+ else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
+ return CreateType(ET, Unit);
+
+ return llvm::DIType();
+}
+
+llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
+ llvm::DIFile Unit) {
+ llvm::DIType ElementTy = getOrCreateType(Ty->getElementType(), Unit);
+ uint64_t NumElems = Ty->getNumElements();
+ if (NumElems > 0)
+ --NumElems;
+
+ llvm::DIDescriptor Subscript = DebugFactory.GetOrCreateSubrange(0, NumElems);
+ llvm::DIArray SubscriptArray = DebugFactory.GetOrCreateArray(&Subscript, 1);
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ return
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_vector_type,
+ Unit, "", Unit,
+ 0, Size, Align, 0, 0,
+ ElementTy, SubscriptArray);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
+ llvm::DIFile Unit) {
+ uint64_t Size;
+ uint64_t Align;
+
+
+ // FIXME: make getTypeAlign() aware of VLAs and incomplete array types
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
+ Size = 0;
+ Align =
+ CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
+ } else if (Ty->isIncompleteArrayType()) {
+ Size = 0;
+ Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+ } else {
+ // Size and align of the whole array, not the element type.
+ Size = CGM.getContext().getTypeSize(Ty);
+ Align = CGM.getContext().getTypeAlign(Ty);
+ }
+
+ // Add the dimensions of the array. FIXME: This loses CV qualifiers from
+ // interior arrays, do we care? Why aren't nested arrays represented the
+ // obvious/recursive way?
+ llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts;
+ QualType EltTy(Ty, 0);
+ while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+ uint64_t Upper = 0;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+ if (CAT->getSize().getZExtValue())
+ Upper = CAT->getSize().getZExtValue() - 1;
+ // FIXME: Verify this is right for VLAs.
+ Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Upper));
+ EltTy = Ty->getElementType();
+ }
+
+ llvm::DIArray SubscriptArray =
+ DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+
+ llvm::DIType DbgTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_array_type,
+ Unit, "", Unit,
+ 0, Size, Align, 0, 0,
+ getOrCreateType(EltTy, Unit),
+ SubscriptArray);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type,
+ Ty, Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
+ llvm::DIFile U) {
+ QualType PointerDiffTy = CGM.getContext().getPointerDiffType();
+ llvm::DIType PointerDiffDITy = getOrCreateType(PointerDiffTy, U);
+
+ if (!Ty->getPointeeType()->isFunctionType()) {
+ // We have a data member pointer type.
+ return PointerDiffDITy;
+ }
+
+ // We have a member function pointer type. Treat it as a struct with two
+ // ptrdiff_t members.
+ std::pair<uint64_t, unsigned> Info = CGM.getContext().getTypeInfo(Ty);
+
+ uint64_t FieldOffset = 0;
+ llvm::DIDescriptor ElementTypes[2];
+
+ // FIXME: This should probably be a function type instead.
+ ElementTypes[0] =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, U,
+ "ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
+ FieldOffset += Info.first;
+
+ ElementTypes[1] =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, U,
+ "ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(&ElementTypes[0],
+ llvm::array_lengthof(ElementTypes));
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
+ U, llvm::StringRef("test"),
+ U, 0, FieldOffset,
+ 0, 0, 0, llvm::DIType(), Elements);
+}
+
+/// CreateEnumType - get enumeration type.
+llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED, llvm::DIFile Unit){
+ llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
+
+ // Create DIEnumerator elements for each enumerator.
+ for (EnumDecl::enumerator_iterator
+ Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
+ Enum != EnumEnd; ++Enum) {
+ Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getName(),
+ Enum->getInitVal().getZExtValue()));
+ }
+
+ // Return a CompositeType for the enum itself.
+ llvm::DIArray EltArray =
+ DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
+
+ llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
+ unsigned Line = getLineNumber(ED->getLocation());
+ uint64_t Size = 0;
+ uint64_t Align = 0;
+ if (!ED->getTypeForDecl()->isIncompleteType()) {
+ Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ }
+ llvm::DIType DbgTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
+ Unit, ED->getName(), DefUnit, Line,
+ Size, Align, 0, 0,
+ llvm::DIType(), EltArray);
+ return DbgTy;
+}
+
+static QualType UnwrapTypeForDebugInfo(QualType T) {
+ do {
+ QualType LastT = T;
+ switch (T->getTypeClass()) {
+ default:
+ return T;
+ case Type::TemplateSpecialization:
+ T = cast<TemplateSpecializationType>(T)->desugar();
+ break;
+ case Type::TypeOfExpr: {
+ TypeOfExprType *Ty = cast<TypeOfExprType>(T);
+ T = Ty->getUnderlyingExpr()->getType();
+ break;
+ }
+ case Type::TypeOf:
+ T = cast<TypeOfType>(T)->getUnderlyingType();
+ break;
+ case Type::Decltype:
+ T = cast<DecltypeType>(T)->getUnderlyingType();
+ break;
+ case Type::Elaborated:
+ T = cast<ElaboratedType>(T)->getNamedType();
+ break;
+ case Type::SubstTemplateTypeParm:
+ T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
+ break;
+ }
+
+ assert(T != LastT && "Type unwrapping failed to unwrap!");
+ if (T == LastT)
+ return T;
+ } while (true);
+
+ return T;
+}
+
+/// getOrCreateType - Get the type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
+ llvm::DIFile Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ // Check for existing entry.
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ TypeCache.find(Ty.getAsOpaquePtr());
+ if (it != TypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ return llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ // Otherwise create the type.
+ llvm::DIType Res = CreateTypeNode(Ty, Unit);
+
+ // And update the type cache.
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
+ return Res;
+}
+
+/// CreateTypeNode - Create a new debug type node.
+llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
+ llvm::DIFile Unit) {
+ // Handle qualifiers, which recursively handles what they refer to.
+ if (Ty.hasLocalQualifiers())
+ return CreateQualifiedType(Ty, Unit);
+
+ const char *Diag = 0;
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Dependent types cannot show up in debug information");
+
+ // FIXME: Handle these.
+ case Type::ExtVector:
+ return llvm::DIType();
+
+ case Type::Vector:
+ return CreateType(cast<VectorType>(Ty), Unit);
+ case Type::ObjCObjectPointer:
+ return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
+ case Type::ObjCObject:
+ return CreateType(cast<ObjCObjectType>(Ty), Unit);
+ case Type::ObjCInterface:
+ return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
+ case Type::Builtin: return CreateType(cast<BuiltinType>(Ty), Unit);
+ case Type::Complex: return CreateType(cast<ComplexType>(Ty), Unit);
+ case Type::Pointer: return CreateType(cast<PointerType>(Ty), Unit);
+ case Type::BlockPointer:
+ return CreateType(cast<BlockPointerType>(Ty), Unit);
+ case Type::Typedef: return CreateType(cast<TypedefType>(Ty), Unit);
+ case Type::Record:
+ case Type::Enum:
+ return CreateType(cast<TagType>(Ty), Unit);
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return CreateType(cast<FunctionType>(Ty), Unit);
+ case Type::ConstantArray:
+ case Type::VariableArray:
+ case Type::IncompleteArray:
+ return CreateType(cast<ArrayType>(Ty), Unit);
+
+ case Type::LValueReference:
+ return CreateType(cast<LValueReferenceType>(Ty), Unit);
+
+ case Type::MemberPointer:
+ return CreateType(cast<MemberPointerType>(Ty), Unit);
+
+ case Type::TemplateSpecialization:
+ case Type::Elaborated:
+ case Type::SubstTemplateTypeParm:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ llvm_unreachable("type should have been unwrapped!");
+ return llvm::DIType();
+
+ case Type::RValueReference:
+ // FIXME: Implement!
+ Diag = "rvalue references";
+ break;
+ }
+
+ assert(Diag && "Fall through without a diagnostic?");
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(Diagnostic::Error,
+ "debug information for %0 is not yet supported");
+ CGM.getDiags().Report(FullSourceLoc(), DiagID)
+ << Diag;
+ return llvm::DIType();
+}
+
+/// CreateMemberType - Create new member and increase Offset by FType's size.
+llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
+ llvm::StringRef Name,
+ uint64_t *Offset) {
+ llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
+ unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
+ llvm::DIType Ty = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member,
+ Unit, Name, Unit, 0,
+ FieldSize, FieldAlign,
+ *Offset, 0, FieldTy);
+ *Offset += FieldSize;
+ return Ty;
+}
+
+/// EmitFunctionStart - Constructs the debug code for entering a function -
+/// "llvm.dbg.func.start.".
+void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
+ llvm::Function *Fn,
+ CGBuilderTy &Builder) {
+
+ llvm::StringRef Name;
+ llvm::StringRef LinkageName;
+
+ FnBeginRegionCount.push_back(RegionStack.size());
+
+ const Decl *D = GD.getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // If there is a DISubprogram for this function available then use it.
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ FI = SPCache.find(FD);
+ if (FI != SPCache.end()) {
+ llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(FI->second));
+ if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
+ llvm::MDNode *SPN = SP;
+ RegionStack.push_back(SPN);
+ RegionMap[D] = llvm::WeakVH(SP);
+ return;
+ }
+ }
+ Name = getFunctionName(FD);
+ // Use mangled name as linkage name for c/c++ functions.
+ LinkageName = CGM.getMangledName(GD);
+ } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
+ Name = getObjCMethodName(OMD);
+ LinkageName = Name;
+ } else {
+ // Use llvm function name as linkage name.
+ Name = Fn->getName();
+ LinkageName = Name;
+ }
+ if (!Name.empty() && Name[0] == '\01')
+ Name = Name.substr(1);
+
+ // It is expected that CurLoc is set before using EmitFunctionStart.
+ // Usually, CurLoc points to the left bracket location of compound
+ // statement representing function body.
+ llvm::DIFile Unit = getOrCreateFile(CurLoc);
+ unsigned LineNo = getLineNumber(CurLoc);
+
+ llvm::DISubprogram SP =
+ DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
+ getOrCreateType(FnType, Unit),
+ Fn->hasInternalLinkage(), true/*definition*/,
+ 0, 0, llvm::DIType(),
+ D->isImplicit(),
+ CGM.getLangOptions().Optimize, Fn);
+
+ // Push function on region stack.
+ llvm::MDNode *SPN = SP;
+ RegionStack.push_back(SPN);
+ RegionMap[D] = llvm::WeakVH(SP);
+
+ // Clear stack used to keep track of #line directives.
+ LineDirectiveFiles.clear();
+}
+
+
+void CGDebugInfo::EmitStopPoint(CGBuilderTy &Builder) {
+ if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
+
+ // Don't bother if things are the same as last time.
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ if (CurLoc == PrevLoc
+ || (SM.getInstantiationLineNumber(CurLoc) ==
+ SM.getInstantiationLineNumber(PrevLoc)
+ && SM.isFromSameFile(CurLoc, PrevLoc)))
+ // New Builder may not be in sync with CGDebugInfo.
+ if (!Builder.getCurrentDebugLocation().isUnknown())
+ return;
+
+ // Update last state.
+ PrevLoc = CurLoc;
+
+ llvm::MDNode *Scope = RegionStack.back();
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(CurLoc),
+ getColumnNumber(CurLoc),
+ Scope));
+}
+
+/// UpdateLineDirectiveRegion - Update region stack only if #line directive
+/// has introduced scope change.
+void CGDebugInfo::UpdateLineDirectiveRegion(CGBuilderTy &Builder) {
+ if (CurLoc.isInvalid() || CurLoc.isMacroID() ||
+ PrevLoc.isInvalid() || PrevLoc.isMacroID())
+ return;
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
+ PresumedLoc PPLoc = SM.getPresumedLoc(PrevLoc);
+
+ if (!strcmp(PPLoc.getFilename(), PCLoc.getFilename()))
+ return;
+
+ // If #line directive stack is empty then we are entering a new scope.
+ if (LineDirectiveFiles.empty()) {
+ EmitRegionStart(Builder);
+ LineDirectiveFiles.push_back(PCLoc.getFilename());
+ return;
+ }
+
+ assert (RegionStack.size() >= LineDirectiveFiles.size()
+ && "error handling #line regions!");
+
+ bool SeenThisFile = false;
+ for(std::vector<const char *>::iterator I = LineDirectiveFiles.begin(),
+ E = LineDirectiveFiles.end(); I != E; ++I)
+ if (!strcmp(PPLoc.getFilename(), *I)) {
+ SeenThisFile = true;
+ break;
+ }
+
+ // If #line for this file is seen earlier then pop out #line regions.
+ if (SeenThisFile) {
+ while (!LineDirectiveFiles.empty()) {
+ const char *LastFile = LineDirectiveFiles.back();
+ RegionStack.pop_back();
+ LineDirectiveFiles.pop_back();
+ if (!strcmp(PPLoc.getFilename(), LastFile))
+ break;
+ }
+ return;
+ }
+
+ // .. otherwise insert new #line region.
+ EmitRegionStart(Builder);
+ LineDirectiveFiles.push_back(PCLoc.getFilename());
+
+ return;
+}
+/// EmitRegionStart- Constructs the debug code for entering a declarative
+/// region - "llvm.dbg.region.start.".
+void CGDebugInfo::EmitRegionStart(CGBuilderTy &Builder) {
+ llvm::DIDescriptor D =
+ DebugFactory.CreateLexicalBlock(RegionStack.empty() ?
+ llvm::DIDescriptor() :
+ llvm::DIDescriptor(RegionStack.back()),
+ getOrCreateFile(CurLoc),
+ getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
+ llvm::MDNode *DN = D;
+ RegionStack.push_back(DN);
+}
+
+/// EmitRegionEnd - Constructs the debug code for exiting a declarative
+/// region - "llvm.dbg.region.end."
+void CGDebugInfo::EmitRegionEnd(CGBuilderTy &Builder) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ // Provide an region stop point.
+ EmitStopPoint(Builder);
+
+ RegionStack.pop_back();
+}
+
+/// EmitFunctionEnd - Constructs the debug code for exiting a function.
+void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+ unsigned RCount = FnBeginRegionCount.back();
+ assert(RCount <= RegionStack.size() && "Region stack mismatch");
+
+ // Pop all regions for this function.
+ while (RegionStack.size() != RCount)
+ EmitRegionEnd(Builder);
+ FnBeginRegionCount.pop_back();
+}
+
+// EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
+// See BuildByRefType.
+llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
+ uint64_t *XOffset) {
+
+ llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ QualType Type = VD->getType();
+
+ FieldOffset = 0;
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__forwarding", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
+
+ bool HasCopyAndDispose = CGM.BlockRequiresCopying(Type);
+ if (HasCopyAndDispose) {
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__copy_helper",
+ &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__destroy_helper",
+ &FieldOffset));
+ }
+
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ if (Align > CharUnits::fromQuantity(
+ CGM.getContext().Target.getPointerAlign(0) / 8)) {
+ unsigned AlignedOffsetInBytes
+ = llvm::RoundUpToAlignment(FieldOffset/8, Align.getQuantity());
+ unsigned NumPaddingBytes
+ = AlignedOffsetInBytes - FieldOffset/8;
+
+ if (NumPaddingBytes > 0) {
+ llvm::APInt pad(32, NumPaddingBytes);
+ FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy,
+ pad, ArrayType::Normal, 0);
+ EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset));
+ }
+ }
+
+ FType = Type;
+ llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = CGM.getContext().getTypeSize(FType);
+ FieldAlign = Align.getQuantity()*8;
+
+ *XOffset = FieldOffset;
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ VD->getName(), Unit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ unsigned Flags = llvm::DIType::FlagBlockByrefStruct;
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
+ Unit, "", Unit,
+ 0, FieldOffset, 0, 0, Flags,
+ llvm::DIType(), Elements);
+
+}
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
+ llvm::Value *Storage, CGBuilderTy &Builder) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ llvm::DIType Ty;
+ uint64_t XOffset = 0;
+ if (VD->hasAttr<BlocksAttr>())
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
+
+ // If there is not any debug info for type then do not emit debug info
+ // for this variable.
+ if (!Ty)
+ return;
+
+ // Get location information.
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DebugFactory.CreateVariable(Tag, llvm::DIDescriptor(RegionStack.back()),
+ VD->getName(),
+ Unit, Line, Ty, CGM.getLangOptions().Optimize);
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+ llvm::MDNode *Scope = RegionStack.back();
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+}
+
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
+ llvm::Value *Storage, CGBuilderTy &Builder,
+ CodeGenFunction *CGF) {
+ const ValueDecl *VD = BDRE->getDecl();
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ if (Builder.GetInsertBlock() == 0)
+ return;
+
+ uint64_t XOffset = 0;
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ llvm::DIType Ty;
+ if (VD->hasAttr<BlocksAttr>())
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
+
+ // Get location information.
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
+
+ CharUnits offset = CGF->BlockDecls[VD];
+ llvm::SmallVector<llvm::Value *, 9> addr;
+ const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ if (BDRE->isByRef()) {
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ // offset of __forwarding field
+ offset = CharUnits::fromQuantity(CGF->LLVMPointerWidth/8);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ // offset of x field
+ offset = CharUnits::fromQuantity(XOffset/8);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ }
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DebugFactory.CreateComplexVariable(Tag,
+ llvm::DIDescriptor(RegionStack.back()),
+ VD->getName(), Unit, Line, Ty,
+ addr);
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+ llvm::MDNode *Scope = RegionStack.back();
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+}
+
+void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
+ llvm::Value *Storage,
+ CGBuilderTy &Builder) {
+ EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder);
+}
+
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
+ const BlockDeclRefExpr *BDRE, llvm::Value *Storage, CGBuilderTy &Builder,
+ CodeGenFunction *CGF) {
+ EmitDeclare(BDRE, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder, CGF);
+}
+
+/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+/// variable declaration.
+void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
+ CGBuilderTy &Builder) {
+ EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, Builder);
+}
+
+
+
+/// EmitGlobalVariable - Emit information about a global variable.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ const VarDecl *D) {
+
+ // Create global variable debug descriptor.
+ llvm::DIFile Unit = getOrCreateFile(D->getLocation());
+ unsigned LineNo = getLineNumber(D->getLocation());
+
+ QualType T = D->getType();
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+ llvm::StringRef DeclName = D->getName();
+ llvm::StringRef LinkageName;
+ if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext()))
+ LinkageName = Var->getName();
+ llvm::DIDescriptor DContext =
+ getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()), Unit);
+ DebugFactory.CreateGlobalVariable(DContext, DeclName, DeclName, LinkageName,
+ Unit, LineNo, getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(),
+ true/*definition*/, Var);
+}
+
+/// EmitGlobalVariable - Emit information about an objective-c interface.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ ObjCInterfaceDecl *ID) {
+ // Create global variable debug descriptor.
+ llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
+ unsigned LineNo = getLineNumber(ID->getLocation());
+
+ llvm::StringRef Name = ID->getName();
+
+ QualType T = CGM.getContext().getObjCInterfaceType(ID);
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ DebugFactory.CreateGlobalVariable(Unit, Name, Name, Name, Unit, LineNo,
+ getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(),
+ true/*definition*/, Var);
+}
+
+/// EmitGlobalVariable - Emit global variable's debug info.
+void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
+ llvm::ConstantInt *Init,
+ CGBuilderTy &Builder) {
+ // Create the descriptor for the variable.
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ llvm::StringRef Name = VD->getName();
+ llvm::DIType Ty = getOrCreateType(VD->getType(), Unit);
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) {
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(ECD->getDeclContext()))
+ Ty = CreateEnumType(ED, Unit);
+ }
+ // Do not use DIGlobalVariable for enums.
+ if (Ty.getTag() == llvm::dwarf::DW_TAG_enumeration_type)
+ return;
+ DebugFactory.CreateGlobalVariable(Unit, Name, Name, Name, Unit,
+ getLineNumber(VD->getLocation()),
+ Ty, true, true, Init);
+}
+
+/// getOrCreateNamesSpace - Return namespace descriptor for the given
+/// namespace decl.
+llvm::DINameSpace
+CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl,
+ llvm::DIDescriptor Unit) {
+ llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I =
+ NameSpaceCache.find(NSDecl);
+ if (I != NameSpaceCache.end())
+ return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
+
+ unsigned LineNo = getLineNumber(NSDecl->getLocation());
+
+ llvm::DIDescriptor Context =
+ getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()), Unit);
+ llvm::DINameSpace NS =
+ DebugFactory.CreateNameSpace(Context, NSDecl->getName(),
+ llvm::DIFile(Unit), LineNo);
+ NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
+ return NS;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
new file mode 100644
index 0000000..a1ad012
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
@@ -0,0 +1,257 @@
+//===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the source level debug info generator for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGDEBUGINFO_H
+#define CLANG_CODEGEN_CGDEBUGINFO_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Allocator.h"
+
+#include "CGBuilder.h"
+
+namespace llvm {
+ class MDNode;
+}
+
+namespace clang {
+ class VarDecl;
+ class ObjCInterfaceDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+ class CodeGenFunction;
+ class GlobalDecl;
+
+/// CGDebugInfo - This class gathers all debug information during compilation
+/// and is responsible for emitting to llvm globals or pass directly to
+/// the backend.
+class CGDebugInfo {
+ CodeGenModule &CGM;
+ llvm::DIFactory DebugFactory;
+ llvm::DICompileUnit TheCU;
+ SourceLocation CurLoc, PrevLoc;
+ llvm::DIType VTablePtrType;
+
+ /// TypeCache - Cache of previously constructed Types.
+ llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
+
+ bool BlockLiteralGenericSet;
+ llvm::DIType BlockLiteralGeneric;
+
+ std::vector<llvm::TrackingVH<llvm::MDNode> > RegionStack;
+ llvm::DenseMap<const Decl *, llvm::WeakVH> RegionMap;
+ // FnBeginRegionCount - Keep track of RegionStack counter at the beginning
+ // of a function. This is used to pop unbalanced regions at the end of a
+ // function.
+ std::vector<unsigned> FnBeginRegionCount;
+
+ /// LineDirectiveFiles - This stack is used to keep track of
+ /// scopes introduced by #line directives.
+ std::vector<const char *> LineDirectiveFiles;
+
+ /// DebugInfoNames - This is a storage for names that are
+ /// constructed on demand. For example, C++ destructors, C++ operators etc..
+ llvm::BumpPtrAllocator DebugInfoNames;
+ llvm::StringRef CWDName;
+
+ llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache;
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
+ llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
+
+ /// Helper functions for getOrCreateType.
+ llvm::DIType CreateType(const BuiltinType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ComplexType *Ty, llvm::DIFile F);
+ llvm::DIType CreateQualifiedType(QualType Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const TypedefType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DIFile F);
+ llvm::DIType CreateType(const PointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const FunctionType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const TagType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const RecordType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const EnumType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ArrayType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateEnumType(const EnumDecl *ED, llvm::DIFile Unit);
+ llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile F);
+ llvm::DIType getOrCreateVTablePtrType(llvm::DIFile F);
+ llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N,
+ llvm::DIDescriptor Unit);
+
+ llvm::DIType CreatePointerLikeType(unsigned Tag,
+ const Type *Ty, QualType PointeeTy,
+ llvm::DIFile F);
+
+ llvm::DISubprogram CreateCXXMemberFunction(const CXXMethodDecl *Method,
+ llvm::DIFile F,
+ llvm::DIType RecordTy);
+
+ void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &E,
+ llvm::DIType T);
+
+ void CollectCXXFriends(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DIType RecordTy);
+
+ void CollectCXXBases(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DIType RecordTy);
+
+
+ void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &E);
+
+ void CollectVTableInfo(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys);
+
+public:
+ CGDebugInfo(CodeGenModule &CGM);
+ ~CGDebugInfo();
+
+ /// setLocation - Update the current source location. If \arg loc is
+ /// invalid it is ignored.
+ void setLocation(SourceLocation Loc);
+
+ /// EmitStopPoint - Emit a call to llvm.dbg.stoppoint to indicate a change of
+ /// source line.
+ void EmitStopPoint(CGBuilderTy &Builder);
+
+ /// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
+ /// start of a new function.
+ void EmitFunctionStart(GlobalDecl GD, QualType FnType,
+ llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitFunctionEnd - Constructs the debug code for exiting a function.
+ void EmitFunctionEnd(CGBuilderTy &Builder);
+
+ /// UpdateLineDirectiveRegion - Update region stack only if #line directive
+ /// has introduced scope change.
+ void UpdateLineDirectiveRegion(CGBuilderTy &Builder);
+
+ /// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
+ /// of a new block.
+ void EmitRegionStart(CGBuilderTy &Builder);
+
+ /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
+ /// block.
+ void EmitRegionEnd(CGBuilderTy &Builder);
+
+ /// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic
+ /// variable declaration.
+ void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitDeclareOfBlockDeclRefVariable - Emit call to llvm.dbg.declare for an
+ /// imported variable declaration in a block.
+ void EmitDeclareOfBlockDeclRefVariable(const BlockDeclRefExpr *BDRE,
+ llvm::Value *AI,
+ CGBuilderTy &Builder,
+ CodeGenFunction *CGF);
+
+ /// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+ /// variable declaration.
+ void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitGlobalVariable - Emit information about a global variable.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+
+ /// EmitGlobalVariable - Emit information about an objective-c interface.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
+
+ /// EmitGlobalVariable - Emit global variable's debug info.
+ void EmitGlobalVariable(const ValueDecl *VD, llvm::ConstantInt *Init,
+ CGBuilderTy &Builder);
+
+private:
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+ void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+ void EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag, llvm::Value *AI,
+ CGBuilderTy &Builder, CodeGenFunction *CGF);
+
+ // EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
+ // See BuildByRefType.
+ llvm::DIType EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
+ uint64_t *OffSet);
+
+ /// getContextDescriptor - Get context info for the decl.
+ llvm::DIDescriptor getContextDescriptor(const Decl *Decl,
+ llvm::DIDescriptor &CU);
+
+ /// getCurrentDirname - Return current directory name.
+ llvm::StringRef getCurrentDirname();
+
+ /// CreateCompileUnit - Create new compile unit.
+ void CreateCompileUnit();
+
+ /// getOrCreateFile - Get the file debug info descriptor for the input
+ /// location.
+ llvm::DIFile getOrCreateFile(SourceLocation Loc);
+
+ /// getOrCreateType - Get the type from the cache or create a new type if
+ /// necessary.
+ llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile F);
+
+ /// CreateTypeNode - Create type metadata for a source language type.
+ llvm::DIType CreateTypeNode(QualType Ty, llvm::DIFile F);
+
+ /// CreateMemberType - Create new member and increase Offset by FType's size.
+ llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
+ llvm::StringRef Name, uint64_t *Offset);
+
+ /// getFunctionName - Get function name for the given FunctionDecl. If the
+ /// name is constructred on demand (e.g. C++ destructor) then the name
+ /// is stored on the side.
+ llvm::StringRef getFunctionName(const FunctionDecl *FD);
+ /// getObjCMethodName - Returns the unmangled name of an Objective-C method.
+ /// This is the display name for the debugging info.
+ llvm::StringRef getObjCMethodName(const ObjCMethodDecl *FD);
+
+ /// getClassName - Get class name including template argument list.
+ llvm::StringRef getClassName(RecordDecl *RD);
+
+ /// getVTableName - Get vtable name for the given Class.
+ llvm::StringRef getVTableName(const CXXRecordDecl *Decl);
+
+ /// getLineNumber - Get line number for the location. If location is invalid
+ /// then use current location.
+ unsigned getLineNumber(SourceLocation Loc);
+
+ /// getColumnNumber - Get column number for the location. If location is
+ /// invalid then use current location.
+ unsigned getColumnNumber(SourceLocation Loc);
+};
+} // namespace CodeGen
+} // namespace clang
+
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
new file mode 100644
index 0000000..57e5236
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -0,0 +1,846 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Decl nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+void CodeGenFunction::EmitDecl(const Decl &D) {
+ switch (D.getKind()) {
+ case Decl::TranslationUnit:
+ case Decl::Namespace:
+ case Decl::UnresolvedUsingTypename:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::TemplateTypeParm:
+ case Decl::UnresolvedUsingValue:
+ case Decl::NonTypeTemplateParm:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::Field:
+ case Decl::ObjCIvar:
+ case Decl::ObjCAtDefsField:
+ case Decl::ParmVar:
+ case Decl::ImplicitParam:
+ case Decl::ClassTemplate:
+ case Decl::FunctionTemplate:
+ case Decl::TemplateTemplateParm:
+ case Decl::ObjCMethod:
+ case Decl::ObjCCategory:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCInterface:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCProperty:
+ case Decl::ObjCCompatibleAlias:
+ case Decl::AccessSpec:
+ case Decl::LinkageSpec:
+ case Decl::ObjCPropertyImpl:
+ case Decl::ObjCClass:
+ case Decl::ObjCForwardProtocol:
+ case Decl::FileScopeAsm:
+ case Decl::Friend:
+ case Decl::FriendTemplate:
+ case Decl::Block:
+
+ assert(0 && "Declaration not should not be in declstmts!");
+ case Decl::Function: // void X();
+ case Decl::Record: // struct/union/class X;
+ case Decl::Enum: // enum X;
+ case Decl::EnumConstant: // enum ? { X = ? }
+ case Decl::CXXRecord: // struct/union/class X; [C++]
+ case Decl::Using: // using X; [C++]
+ case Decl::UsingShadow:
+ case Decl::UsingDirective: // using namespace X; [C++]
+ case Decl::NamespaceAlias:
+ case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
+ // None of these decls require codegen support.
+ return;
+
+ case Decl::Var: {
+ const VarDecl &VD = cast<VarDecl>(D);
+ assert(VD.isBlockVarDecl() &&
+ "Should not see file-scope variables inside a function!");
+ return EmitBlockVarDecl(VD);
+ }
+
+ case Decl::Typedef: { // typedef int X;
+ const TypedefDecl &TD = cast<TypedefDecl>(D);
+ QualType Ty = TD.getUnderlyingType();
+
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ }
+ }
+}
+
+/// EmitBlockVarDecl - This method handles emission of any variable declaration
+/// inside a function, including static vars etc.
+void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
+ if (D.hasAttr<AsmLabelAttr>())
+ CGM.ErrorUnsupported(&D, "__asm__");
+
+ switch (D.getStorageClass()) {
+ case SC_None:
+ case SC_Auto:
+ case SC_Register:
+ return EmitLocalBlockVarDecl(D);
+ case SC_Static: {
+ llvm::GlobalValue::LinkageTypes Linkage =
+ llvm::GlobalValue::InternalLinkage;
+
+ // If the function definition has some sort of weak linkage, its
+ // static variables should also be weak so that they get properly
+ // uniqued. We can't do this in C, though, because there's no
+ // standard way to agree on which variables are the same (i.e.
+ // there's no mangling).
+ if (getContext().getLangOptions().CPlusPlus)
+ if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
+ Linkage = CurFn->getLinkage();
+
+ return EmitStaticBlockVarDecl(D, Linkage);
+ }
+ case SC_Extern:
+ case SC_PrivateExtern:
+ // Don't emit it now, allow it to be emitted lazily on its first use.
+ return;
+ }
+
+ assert(0 && "Unknown storage class");
+}
+
+static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
+ const char *Separator) {
+ CodeGenModule &CGM = CGF.CGM;
+ if (CGF.getContext().getLangOptions().CPlusPlus) {
+ llvm::StringRef Name = CGM.getMangledName(&D);
+ return Name.str();
+ }
+
+ std::string ContextName;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
+ llvm::StringRef Name = CGM.getMangledName(FD);
+ ContextName = Name.str();
+ } else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
+ ContextName = CGF.CurFn->getName();
+ else
+ // FIXME: What about in a block??
+ assert(0 && "Unknown context for block var decl");
+
+ return ContextName + Separator + D.getNameAsString();
+}
+
+llvm::GlobalVariable *
+CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ QualType Ty = D.getType();
+ assert(Ty->isConstantSizeType() && "VLAs can't be static");
+
+ std::string Name = GetStaticDeclName(*this, D, Separator);
+
+ const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), LTy,
+ Ty.isConstant(getContext()), Linkage,
+ CGM.EmitNullConstant(D.getType()), Name, 0,
+ D.isThreadSpecified(), Ty.getAddressSpace());
+ GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ return GV;
+}
+
+/// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the
+/// global variable that has already been created for it. If the initializer
+/// has a different type than GV does, this may free GV and return a different
+/// one. Otherwise it just returns GV.
+llvm::GlobalVariable *
+CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
+
+ // If constant emission failed, then this should be a C++ static
+ // initializer.
+ if (!Init) {
+ if (!getContext().getLangOptions().CPlusPlus)
+ CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
+ else {
+ // Since we have a static initializer, this global variable can't
+ // be constant.
+ GV->setConstant(false);
+
+ EmitStaticCXXBlockVarDeclInit(D, GV);
+ }
+ return GV;
+ }
+
+ // The initializer may differ in type from the global. Rewrite
+ // the global to match the initializer. (We have to do this
+ // because some types, like unions, can't be completely represented
+ // in the LLVM type system.)
+ if (GV->getType()->getElementType() != Init->getType()) {
+ llvm::GlobalVariable *OldGV = GV;
+
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ OldGV->isConstant(),
+ OldGV->getLinkage(), Init, "",
+ 0, D.isThreadSpecified(),
+ D.getType().getAddressSpace());
+
+ // Steal the name of the old global
+ GV->takeName(OldGV);
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ OldGV->eraseFromParent();
+ }
+
+ GV->setInitializer(Init);
+ return GV;
+}
+
+void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+
+ llvm::GlobalVariable *GV = CreateStaticBlockVarDecl(D, ".", Linkage);
+
+ // Store into LocalDeclMap before generating initializer to handle
+ // circular references.
+ DMEntry = GV;
+
+ // We can't have a VLA here, but we can have a pointer to a VLA,
+ // even though that doesn't really make any sense.
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (D.getType()->isVariablyModifiedType())
+ EmitVLASize(D.getType());
+
+ // If this value has an initializer, emit it.
+ if (D.getInit())
+ GV = AddInitializerToGlobalBlockVarDecl(D, GV);
+
+ GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+
+ // FIXME: Merge attribute handling.
+ if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ llvm::Constant *Ann =
+ CGM.EmitAnnotateAttr(GV, AA,
+ SM.getInstantiationLineNumber(D.getLocation()));
+ CGM.AddAnnotation(Ann);
+ }
+
+ if (const SectionAttr *SA = D.getAttr<SectionAttr>())
+ GV->setSection(SA->getName());
+
+ if (D.hasAttr<UsedAttr>())
+ CGM.AddUsedGlobal(GV);
+
+ if (getContext().getLangOptions().CPlusPlus)
+ CGM.setStaticLocalDeclAddress(&D, GV);
+
+ // We may have to cast the constant because of the initializer
+ // mismatch above.
+ //
+ // FIXME: It is really dangerous to store this in the map; if anyone
+ // RAUW's the GV uses of this constant will be invalid.
+ const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
+ const llvm::Type *LPtrTy =
+ llvm::PointerType::get(LTy, D.getType().getAddressSpace());
+ DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
+
+ // Emit global variable debug descriptor for static vars.
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(D.getLocation());
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
+ }
+}
+
+unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
+ assert(ByRefValueInfo.count(VD) && "Did not find value!");
+
+ return ByRefValueInfo.find(VD)->second.second;
+}
+
+/// BuildByRefType - This routine changes a __block variable declared as T x
+/// into:
+///
+/// struct {
+/// void *__isa;
+/// void *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__copy_helper; // only if needed
+/// void *__destroy_helper; // only if needed
+/// char padding[X]; // only if needed
+/// T x;
+/// } x
+///
+const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
+ std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
+ if (Info.first)
+ return Info.first;
+
+ QualType Ty = D->getType();
+
+ std::vector<const llvm::Type *> Types;
+
+ const llvm::PointerType *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(VMContext);
+
+ // void *__isa;
+ Types.push_back(Int8PtrTy);
+
+ // void *__forwarding;
+ Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
+
+ // int32_t __flags;
+ Types.push_back(Int32Ty);
+
+ // int32_t __size;
+ Types.push_back(Int32Ty);
+
+ bool HasCopyAndDispose = BlockRequiresCopying(Ty);
+ if (HasCopyAndDispose) {
+ /// void *__copy_helper;
+ Types.push_back(Int8PtrTy);
+
+ /// void *__destroy_helper;
+ Types.push_back(Int8PtrTy);
+ }
+
+ bool Packed = false;
+ CharUnits Align = getContext().getDeclAlign(D);
+ if (Align > CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)) {
+ // We have to insert padding.
+
+ // The struct above has 2 32-bit integers.
+ unsigned CurrentOffsetInBytes = 4 * 2;
+
+ // And either 2 or 4 pointers.
+ CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
+ CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
+
+ // Align the offset.
+ unsigned AlignedOffsetInBytes =
+ llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
+
+ unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
+ if (NumPaddingBytes > 0) {
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ // FIXME: We need a sema error for alignment larger than the minimum of
+ // the maximal stack alignmint and the alignment of malloc on the system.
+ if (NumPaddingBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
+
+ Types.push_back(Ty);
+
+ // We want a packed struct.
+ Packed = true;
+ }
+ }
+
+ // T x;
+ Types.push_back(ConvertTypeForMem(Ty));
+
+ const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed);
+
+ cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
+ CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
+ ByRefTypeHolder.get());
+
+ Info.first = ByRefTypeHolder.get();
+
+ Info.second = Types.size() - 1;
+
+ return Info.first;
+}
+
+namespace {
+ struct CallArrayDtor : EHScopeStack::Cleanup {
+ CallArrayDtor(const CXXDestructorDecl *Dtor,
+ const ConstantArrayType *Type,
+ llvm::Value *Loc)
+ : Dtor(Dtor), Type(Type), Loc(Loc) {}
+
+ const CXXDestructorDecl *Dtor;
+ const ConstantArrayType *Type;
+ llvm::Value *Loc;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Type);
+ const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr);
+ CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr);
+ }
+ };
+
+ struct CallVarDtor : EHScopeStack::Cleanup {
+ CallVarDtor(const CXXDestructorDecl *Dtor,
+ llvm::Value *NRVOFlag,
+ llvm::Value *Loc)
+ : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {}
+
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *NRVOFlag;
+ llvm::Value *Loc;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // Along the exceptions path we always execute the dtor.
+ bool NRVO = !IsForEH && NRVOFlag;
+
+ llvm::BasicBlock *SkipDtorBB = 0;
+ if (NRVO) {
+ // If we exited via NRVO, we skip the destructor call.
+ llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
+ SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
+ llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
+ CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
+ CGF.EmitBlock(RunDtorBB);
+ }
+
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Loc);
+
+ if (NRVO) CGF.EmitBlock(SkipDtorBB);
+ }
+ };
+}
+
+namespace {
+ struct CallStackRestore : EHScopeStack::Cleanup {
+ llvm::Value *Stack;
+ CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::Value *V = CGF.Builder.CreateLoad(Stack, "tmp");
+ llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ CGF.Builder.CreateCall(F, V);
+ }
+ };
+
+ struct CallCleanupFunction : EHScopeStack::Cleanup {
+ llvm::Constant *CleanupFn;
+ const CGFunctionInfo &FnInfo;
+ llvm::Value *Addr;
+ const VarDecl &Var;
+
+ CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
+ llvm::Value *Addr, const VarDecl *Var)
+ : CleanupFn(CleanupFn), FnInfo(*Info), Addr(Addr), Var(*Var) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // In some cases, the type of the function argument will be different from
+ // the type of the pointer. An example of this is
+ // void f(void* arg);
+ // __attribute__((cleanup(f))) void *g;
+ //
+ // To fix this we insert a bitcast here.
+ QualType ArgTy = FnInfo.arg_begin()->type;
+ llvm::Value *Arg =
+ CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
+
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(Arg),
+ CGF.getContext().getPointerType(Var.getType())));
+ CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
+ }
+ };
+
+ struct CallBlockRelease : EHScopeStack::Cleanup {
+ llvm::Value *Addr;
+ CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::Value *V = CGF.Builder.CreateStructGEP(Addr, 1, "forwarding");
+ V = CGF.Builder.CreateLoad(V);
+ CGF.BuildBlockRelease(V);
+ }
+ };
+}
+
+/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
+/// variable declaration with auto, register, or no storage class specifier.
+/// These turn into simple stack objects, or GlobalValues depending on target.
+void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
+ SpecialInitFn *SpecialInit) {
+ QualType Ty = D.getType();
+ unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
+ bool isByRef = D.hasAttr<BlocksAttr>();
+ bool needsDispose = false;
+ CharUnits Align = CharUnits::Zero();
+ bool IsSimpleConstantInitializer = false;
+
+ bool NRVO = false;
+ llvm::Value *NRVOFlag = 0;
+ llvm::Value *DeclPtr;
+ if (Ty->isConstantSizeType()) {
+ if (!Target.useGlobalsForAutomaticVariables()) {
+ NRVO = getContext().getLangOptions().ElideConstructors &&
+ D.isNRVOVariable();
+ // If this value is an array or struct, is POD, and if the initializer is
+ // a staticly determinable constant, try to optimize it (unless the NRVO
+ // is already optimizing this).
+ if (!NRVO && D.getInit() && !isByRef &&
+ (Ty->isArrayType() || Ty->isRecordType()) &&
+ Ty->isPODType() &&
+ D.getInit()->isConstantInitializer(getContext(), false)) {
+ // If this variable is marked 'const', emit the value as a global.
+ if (CGM.getCodeGenOpts().MergeAllConstants &&
+ Ty.isConstant(getContext())) {
+ EmitStaticBlockVarDecl(D, llvm::GlobalValue::InternalLinkage);
+ return;
+ }
+
+ IsSimpleConstantInitializer = true;
+ }
+
+ // A normal fixed sized variable becomes an alloca in the entry block,
+ // unless it's an NRVO variable.
+ const llvm::Type *LTy = ConvertTypeForMem(Ty);
+
+ if (NRVO) {
+ // The named return value optimization: allocate this variable in the
+ // return slot, so that we can elide the copy when returning this
+ // variable (C++0x [class.copy]p34).
+ DeclPtr = ReturnValue;
+
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
+ // Create a flag that is used to indicate when the NRVO was applied
+ // to this variable. Set it to zero to indicate that NRVO was not
+ // applied.
+ const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
+ llvm::Value *Zero = llvm::ConstantInt::get(BoolTy, 0);
+ NRVOFlag = CreateTempAlloca(BoolTy, "nrvo");
+ Builder.CreateStore(Zero, NRVOFlag);
+
+ // Record the NRVO flag for this variable.
+ NRVOFlags[&D] = NRVOFlag;
+ }
+ }
+ } else {
+ if (isByRef)
+ LTy = BuildByRefType(&D);
+
+ llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
+ Alloc->setName(D.getNameAsString());
+
+ Align = getContext().getDeclAlign(&D);
+ if (isByRef)
+ Align = std::max(Align,
+ CharUnits::fromQuantity(Target.getPointerAlign(0) / 8));
+ Alloc->setAlignment(Align.getQuantity());
+ DeclPtr = Alloc;
+ }
+ } else {
+ // Targets that don't support recursion emit locals as globals.
+ const char *Class =
+ D.getStorageClass() == SC_Register ? ".reg." : ".auto.";
+ DeclPtr = CreateStaticBlockVarDecl(D, Class,
+ llvm::GlobalValue
+ ::InternalLinkage);
+ }
+
+ // FIXME: Can this happen?
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ } else {
+ EnsureInsertPoint();
+
+ if (!DidCallStackSave) {
+ // Save the stack.
+ const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+ llvm::Value *V = Builder.CreateCall(F);
+
+ Builder.CreateStore(V, Stack);
+
+ DidCallStackSave = true;
+
+ // Push a cleanup block and restore the stack there.
+ EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
+ }
+
+ // Get the element type.
+ const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
+ const llvm::Type *LElemPtrTy =
+ llvm::PointerType::get(LElemTy, Ty.getAddressSpace());
+
+ llvm::Value *VLASize = EmitVLASize(Ty);
+
+ // Allocate memory for the array.
+ llvm::AllocaInst *VLA =
+ Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
+ VLA->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+
+ DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
+ }
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+
+ // Emit debug info for local var declaration.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ assert(HaveInsertPoint() && "Unexpected unreachable point!");
+
+ DI->setLocation(D.getLocation());
+ if (Target.useGlobalsForAutomaticVariables()) {
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
+ } else
+ DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ }
+
+ // If this local has an initializer, emit it now.
+ const Expr *Init = D.getInit();
+
+ // If we are at an unreachable point, we don't need to emit the initializer
+ // unless it contains a label.
+ if (!HaveInsertPoint()) {
+ if (!ContainsLabel(Init))
+ Init = 0;
+ else
+ EnsureInsertPoint();
+ }
+
+ if (isByRef) {
+ const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);
+
+ EnsureInsertPoint();
+ llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
+ llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
+ llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
+ llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
+ llvm::Value *V;
+ int flag = 0;
+ int flags = 0;
+
+ needsDispose = true;
+
+ if (Ty->isBlockPointerType()) {
+ flag |= BLOCK_FIELD_IS_BLOCK;
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ } else if (BlockRequiresCopying(Ty)) {
+ flag |= BLOCK_FIELD_IS_OBJECT;
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ }
+
+ // FIXME: Someone double check this.
+ if (Ty.isObjCGCWeak())
+ flag |= BLOCK_FIELD_IS_WEAK;
+
+ int isa = 0;
+ if (flag&BLOCK_FIELD_IS_WEAK)
+ isa = 1;
+ V = llvm::ConstantInt::get(Int32Ty, isa);
+ V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
+ Builder.CreateStore(V, isa_field);
+
+ Builder.CreateStore(DeclPtr, forwarding_field);
+
+ V = llvm::ConstantInt::get(Int32Ty, flags);
+ Builder.CreateStore(V, flags_field);
+
+ const llvm::Type *V1;
+ V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
+ V = llvm::ConstantInt::get(Int32Ty,
+ CGM.GetTargetTypeStoreSize(V1).getQuantity());
+ Builder.CreateStore(V, size_field);
+
+ if (flags & BLOCK_HAS_COPY_DISPOSE) {
+ BlockHasCopyDispose = true;
+ llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
+ Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag,
+ Align.getQuantity()),
+ copy_helper);
+
+ llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
+ Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
+ Align.getQuantity()),
+ destroy_helper);
+ }
+ }
+
+ if (SpecialInit) {
+ SpecialInit(*this, D, DeclPtr);
+ } else if (Init) {
+ llvm::Value *Loc = DeclPtr;
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+
+ bool isVolatile = getContext().getCanonicalType(Ty).isVolatileQualified();
+
+ // If the initializer was a simple constant initializer, we can optimize it
+ // in various ways.
+ if (IsSimpleConstantInitializer) {
+ llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), Ty,this);
+ assert(Init != 0 && "Wasn't a simple constant init?");
+
+ llvm::Value *AlignVal =
+ llvm::ConstantInt::get(Int32Ty, Align.getQuantity());
+ const llvm::Type *IntPtr =
+ llvm::IntegerType::get(VMContext, LLVMPointerWidth);
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(IntPtr,
+ getContext().getTypeSizeInChars(Ty).getQuantity());
+
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+ if (Loc->getType() != BP)
+ Loc = Builder.CreateBitCast(Loc, BP, "tmp");
+
+ llvm::Value *NotVolatile =
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0);
+
+ // If the initializer is all zeros, codegen with memset.
+ if (isa<llvm::ConstantAggregateZero>(Init)) {
+ llvm::Value *Zero =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
+ Builder.CreateCall5(CGM.getMemSetFn(Loc->getType(), SizeVal->getType()),
+ Loc, Zero, SizeVal, AlignVal, NotVolatile);
+ } else {
+ // Otherwise, create a temporary global with the initializer then
+ // memcpy from the global to the alloca.
+ std::string Name = GetStaticDeclName(*this, D, ".");
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name, 0, false, 0);
+ GV->setAlignment(Align.getQuantity());
+
+ llvm::Value *SrcPtr = GV;
+ if (SrcPtr->getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+
+ Builder.CreateCall5(CGM.getMemCpyFn(Loc->getType(), SrcPtr->getType(),
+ SizeVal->getType()),
+ Loc, SrcPtr, SizeVal, AlignVal, NotVolatile);
+ }
+ } else if (Ty->isReferenceType()) {
+ RValue RV = EmitReferenceBindingToExpr(Init, &D);
+ EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Alignment, Ty);
+ } else if (!hasAggregateLLVMType(Init->getType())) {
+ llvm::Value *V = EmitScalarExpr(Init);
+ EmitStoreOfScalar(V, Loc, isVolatile, Alignment, Ty);
+ } else if (Init->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, Loc, isVolatile);
+ } else {
+ EmitAggExpr(Init, Loc, isVolatile);
+ }
+ }
+
+ // Handle CXX destruction of variables.
+ QualType DtorTy(Ty);
+ while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
+ DtorTy = getContext().getBaseElementType(Array);
+ if (const RecordType *RT = DtorTy->getAs<RecordType>())
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!ClassDecl->hasTrivialDestructor()) {
+ // Note: We suppress the destructor call when the corresponding NRVO
+ // flag has been set.
+ llvm::Value *Loc = DeclPtr;
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+
+ const CXXDestructorDecl *D = ClassDecl->getDestructor();
+ assert(D && "EmitLocalBlockVarDecl - destructor is nul");
+
+ if (const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(Ty)) {
+ EHStack.pushCleanup<CallArrayDtor>(NormalAndEHCleanup,
+ D, Array, Loc);
+ } else {
+ EHStack.pushCleanup<CallVarDtor>(NormalAndEHCleanup,
+ D, NRVOFlag, Loc);
+ }
+ }
+ }
+
+ // Handle the cleanup attribute
+ if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
+ const FunctionDecl *FD = CA->getFunctionDecl();
+
+ llvm::Constant* F = CGM.GetAddrOfFunction(FD);
+ assert(F && "Could not find function!");
+
+ const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
+ EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup,
+ F, &Info, DeclPtr, &D);
+ }
+
+ // If this is a block variable, clean it up.
+ // FIXME: this should be an EH cleanup as well. rdar://problem/8224178
+ if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly)
+ EHStack.pushCleanup<CallBlockRelease>(NormalCleanup, DeclPtr);
+}
+
+/// Emit an alloca (or GlobalValue depending on target)
+/// for the specified parameter and set up LocalDeclMap.
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
+ // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
+ assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
+ "Invalid argument to EmitParmDecl");
+ QualType Ty = D.getType();
+ CanQualType CTy = getContext().getCanonicalType(Ty);
+
+ llvm::Value *DeclPtr;
+ // If this is an aggregate or variable sized value, reuse the input pointer.
+ if (!Ty->isConstantSizeType() ||
+ CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ DeclPtr = Arg;
+ } else {
+ // Otherwise, create a temporary to hold the value.
+ DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
+
+ // Store the initial value into the alloca.
+ unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
+ EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Alignment, Ty);
+ }
+ Arg->setName(D.getName());
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+
+ // Emit debug info for param declaration.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
new file mode 100644
index 0000000..e2f1975
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -0,0 +1,455 @@
+//===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ declarations
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Intrinsics.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *DeclPtr) {
+ assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
+ assert(!D.getType()->isReferenceType() &&
+ "Should not call EmitDeclInit on a reference!");
+
+ ASTContext &Context = CGF.getContext();
+
+ const Expr *Init = D.getInit();
+ QualType T = D.getType();
+ bool isVolatile = Context.getCanonicalType(T).isVolatileQualified();
+
+ unsigned Alignment = Context.getDeclAlign(&D).getQuantity();
+ if (!CGF.hasAggregateLLVMType(T)) {
+ llvm::Value *V = CGF.EmitScalarExpr(Init);
+ CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, Alignment, T);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
+ } else {
+ CGF.EmitAggExpr(Init, DeclPtr, isVolatile);
+ }
+}
+
+static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *DeclPtr) {
+ CodeGenModule &CGM = CGF.CGM;
+ ASTContext &Context = CGF.getContext();
+
+ QualType T = D.getType();
+
+ // Drill down past array types.
+ const ConstantArrayType *Array = Context.getAsConstantArrayType(T);
+ if (Array)
+ T = Context.getBaseElementType(Array);
+
+ /// If that's not a record, we're done.
+ /// FIXME: __attribute__((cleanup)) ?
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialDestructor())
+ return;
+
+ CXXDestructorDecl *Dtor = RD->getDestructor();
+
+ llvm::Constant *DtorFn;
+ if (Array) {
+ DtorFn =
+ CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor, Array,
+ DeclPtr);
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
+ } else
+ DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
+
+ CGF.EmitCXXGlobalDtorRegistration(DtorFn, DeclPtr);
+}
+
+void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
+ llvm::Constant *DeclPtr) {
+
+ const Expr *Init = D.getInit();
+ QualType T = D.getType();
+
+ if (!T->isReferenceType()) {
+ EmitDeclInit(*this, D, DeclPtr);
+ EmitDeclDestroy(*this, D, DeclPtr);
+ return;
+ }
+
+ unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
+ RValue RV = EmitReferenceBindingToExpr(Init, &D);
+ EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
+}
+
+void
+CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
+ llvm::Constant *DeclPtr) {
+ // Generate a global destructor entry if not using __cxa_atexit.
+ if (!CGM.getCodeGenOpts().CXAAtExit) {
+ CGM.AddCXXDtorEntry(DtorFn, DeclPtr);
+ return;
+ }
+
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+ std::vector<const llvm::Type *> Params;
+ Params.push_back(Int8PtrTy);
+
+ // Get the destructor function type
+ const llvm::Type *DtorFnTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
+
+ Params.clear();
+ Params.push_back(DtorFnTy);
+ Params.push_back(Int8PtrTy);
+ Params.push_back(Int8PtrTy);
+
+ // Get the __cxa_atexit function type
+ // extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
+ const llvm::FunctionType *AtExitFnTy =
+ llvm::FunctionType::get(ConvertType(getContext().IntTy), Params, false);
+
+ llvm::Constant *AtExitFn = CGM.CreateRuntimeFunction(AtExitFnTy,
+ "__cxa_atexit");
+
+ llvm::Constant *Handle = CGM.CreateRuntimeVariable(Int8PtrTy,
+ "__dso_handle");
+ llvm::Value *Args[3] = { llvm::ConstantExpr::getBitCast(DtorFn, DtorFnTy),
+ llvm::ConstantExpr::getBitCast(DeclPtr, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(Handle, Int8PtrTy) };
+ Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
+}
+
+static llvm::Function *
+CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
+ const llvm::FunctionType *FTy,
+ llvm::StringRef Name) {
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ Name, &CGM.getModule());
+
+ // Set the section if needed.
+ if (const char *Section =
+ CGM.getContext().Target.getStaticInitSectionSpecifier())
+ Fn->setSection(Section);
+
+ if (!CGM.getLangOptions().Exceptions)
+ Fn->setDoesNotThrow();
+
+ return Fn;
+}
+
+void
+CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
+ const llvm::FunctionType *FTy
+ = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false);
+
+ // Create a variable initialization function.
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init");
+
+ CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D);
+
+ if (D->hasAttr<InitPriorityAttr>()) {
+ unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
+ OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
+ PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
+ DelayedCXXInitPosition.erase(D);
+ }
+ else {
+ llvm::DenseMap<const Decl *, unsigned>::iterator I =
+ DelayedCXXInitPosition.find(D);
+ if (I == DelayedCXXInitPosition.end()) {
+ CXXGlobalInits.push_back(Fn);
+ } else {
+ assert(CXXGlobalInits[I->second] == 0);
+ CXXGlobalInits[I->second] = Fn;
+ DelayedCXXInitPosition.erase(I);
+ }
+ }
+}
+
+void
+CodeGenModule::EmitCXXGlobalInitFunc() {
+ while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
+ CXXGlobalInits.pop_back();
+
+ if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
+ return;
+
+ const llvm::FunctionType *FTy
+ = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false);
+
+ // Create our global initialization function.
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
+
+ if (!PrioritizedCXXGlobalInits.empty()) {
+ llvm::SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
+ llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
+ PrioritizedCXXGlobalInits.end());
+ for (unsigned i = 0; i < PrioritizedCXXGlobalInits.size(); i++) {
+ llvm::Function *Fn = PrioritizedCXXGlobalInits[i].second;
+ LocalCXXGlobalInits.push_back(Fn);
+ }
+ LocalCXXGlobalInits.append(CXXGlobalInits.begin(), CXXGlobalInits.end());
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &LocalCXXGlobalInits[0],
+ LocalCXXGlobalInits.size());
+ }
+ else
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &CXXGlobalInits[0],
+ CXXGlobalInits.size());
+ AddGlobalCtor(Fn);
+}
+
+void CodeGenModule::EmitCXXGlobalDtorFunc() {
+ if (CXXGlobalDtors.empty())
+ return;
+
+ const llvm::FunctionType *FTy
+ = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false);
+
+ // Create our global destructor function.
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__D_a");
+
+ CodeGenFunction(*this).GenerateCXXGlobalDtorFunc(Fn, CXXGlobalDtors);
+ AddGlobalDtor(Fn);
+}
+
+void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+ const VarDecl *D) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
+ SourceLocation());
+
+ llvm::Constant *DeclPtr = CGM.GetAddrOfGlobalVar(D);
+ EmitCXXGlobalVarDeclInit(*D, DeclPtr);
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ llvm::Constant **Decls,
+ unsigned NumDecls) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
+ SourceLocation());
+
+ for (unsigned i = 0; i != NumDecls; ++i)
+ if (Decls[i])
+ Builder.CreateCall(Decls[i]);
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
+ &DtorsAndObjects) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
+ SourceLocation());
+
+ // Emit the dtors, in reverse order from construction.
+ for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
+ llvm::Value *Callee = DtorsAndObjects[e - i - 1].first;
+ llvm::CallInst *CI = Builder.CreateCall(Callee,
+ DtorsAndObjects[e - i - 1].second);
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
+ CI->setCallingConv(F->getCallingConv());
+ }
+
+ FinishFunction();
+}
+
+static llvm::Constant *getGuardAcquireFn(CodeGenFunction &CGF) {
+ // int __cxa_guard_acquire(__int64_t *guard_object);
+
+ const llvm::Type *Int64PtrTy =
+ llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
+
+ std::vector<const llvm::Type*> Args(1, Int64PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.ConvertType(CGF.getContext().IntTy),
+ Args, /*isVarArg=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
+}
+
+static llvm::Constant *getGuardReleaseFn(CodeGenFunction &CGF) {
+ // void __cxa_guard_release(__int64_t *guard_object);
+
+ const llvm::Type *Int64PtrTy =
+ llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
+
+ std::vector<const llvm::Type*> Args(1, Int64PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, /*isVarArg=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
+}
+
+static llvm::Constant *getGuardAbortFn(CodeGenFunction &CGF) {
+ // void __cxa_guard_abort(__int64_t *guard_object);
+
+ const llvm::Type *Int64PtrTy =
+ llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
+
+ std::vector<const llvm::Type*> Args(1, Int64PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, /*isVarArg=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
+}
+
+namespace {
+ struct CallGuardAbort : EHScopeStack::Cleanup {
+ llvm::GlobalVariable *Guard;
+ CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // It shouldn't be possible for this to throw, but if it can,
+ // this should allow for the possibility of an invoke.
+ CGF.Builder.CreateCall(getGuardAbortFn(CGF), Guard)
+ ->setDoesNotThrow();
+ }
+ };
+}
+
+void
+CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ // Bail out early if this initializer isn't reachable.
+ if (!Builder.GetInsertBlock()) return;
+
+ bool ThreadsafeStatics = getContext().getLangOptions().ThreadsafeStatics;
+
+ llvm::SmallString<256> GuardVName;
+ CGM.getCXXABI().getMangleContext().mangleGuardVariable(&D, GuardVName);
+
+ // Create the guard variable.
+ llvm::GlobalVariable *GuardVariable =
+ new llvm::GlobalVariable(CGM.getModule(), Int64Ty,
+ false, GV->getLinkage(),
+ llvm::Constant::getNullValue(Int64Ty),
+ GuardVName.str());
+
+ // Load the first byte of the guard variable.
+ const llvm::Type *PtrTy
+ = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
+ llvm::Value *V =
+ Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
+
+ llvm::BasicBlock *InitCheckBlock = createBasicBlock("init.check");
+ llvm::BasicBlock *EndBlock = createBasicBlock("init.end");
+
+ // Check if the first byte of the guard variable is zero.
+ Builder.CreateCondBr(Builder.CreateIsNull(V, "tobool"),
+ InitCheckBlock, EndBlock);
+
+ EmitBlock(InitCheckBlock);
+
+ // Variables used when coping with thread-safe statics and exceptions.
+ if (ThreadsafeStatics) {
+ // Call __cxa_guard_acquire.
+ V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
+
+ llvm::BasicBlock *InitBlock = createBasicBlock("init");
+
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
+ InitBlock, EndBlock);
+
+ // Call __cxa_guard_abort along the exceptional edge.
+ if (Exceptions)
+ EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable);
+
+ EmitBlock(InitBlock);
+ }
+
+ if (D.getType()->isReferenceType()) {
+ unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
+ QualType T = D.getType();
+ RValue RV = EmitReferenceBindingToExpr(D.getInit(), &D);
+ EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, Alignment, T);
+ } else
+ EmitDeclInit(*this, D, GV);
+
+ if (ThreadsafeStatics) {
+ // Pop the guard-abort cleanup if we pushed one.
+ if (Exceptions)
+ PopCleanupBlock();
+
+ // Call __cxa_guard_release. This cannot throw.
+ Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
+ } else {
+ llvm::Value *One =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1);
+ Builder.CreateStore(One, Builder.CreateBitCast(GuardVariable, PtrTy));
+ }
+
+ // Register the call to the destructor.
+ if (!D.getType()->isReferenceType())
+ EmitDeclDestroy(*this, D, GV);
+
+ EmitBlock(EndBlock);
+}
+
+/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
+/// invoked, calls the default destructor on array elements in reverse order of
+/// construction.
+llvm::Function *
+CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This) {
+ FunctionArgList Args;
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo());
+ const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
+
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, Args, SourceLocation());
+
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy)->getPointerTo();
+ llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
+
+ EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+
+ FinishFunction();
+
+ return Fn;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
new file mode 100644
index 0000000..7fb616e5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
@@ -0,0 +1,1574 @@
+//===--- CGException.cpp - Emit LLVM Code for C++ exceptions --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ exception related code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtCXX.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
+
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CGException.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t Size) {
+ if (!StartOfBuffer) {
+ unsigned Capacity = 1024;
+ while (Capacity < Size) Capacity *= 2;
+ StartOfBuffer = new char[Capacity];
+ StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
+ } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
+ unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
+ unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
+
+ unsigned NewCapacity = CurrentCapacity;
+ do {
+ NewCapacity *= 2;
+ } while (NewCapacity < UsedCapacity + Size);
+
+ char *NewStartOfBuffer = new char[NewCapacity];
+ char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
+ char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
+ memcpy(NewStartOfData, StartOfData, UsedCapacity);
+ delete [] StartOfBuffer;
+ StartOfBuffer = NewStartOfBuffer;
+ EndOfBuffer = NewEndOfBuffer;
+ StartOfData = NewStartOfData;
+ }
+
+ assert(StartOfBuffer + Size <= StartOfData);
+ StartOfData -= Size;
+ return StartOfData;
+}
+
+EHScopeStack::stable_iterator
+EHScopeStack::getEnclosingEHCleanup(iterator it) const {
+ assert(it != end());
+ do {
+ if (isa<EHCleanupScope>(*it)) {
+ if (cast<EHCleanupScope>(*it).isEHCleanup())
+ return stabilize(it);
+ return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
+ }
+ ++it;
+ } while (it != end());
+ return stable_end();
+}
+
+
+void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
+ assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
+ char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
+ bool IsNormalCleanup = Kind & NormalCleanup;
+ bool IsEHCleanup = Kind & EHCleanup;
+ bool IsActive = !(Kind & InactiveCleanup);
+ EHCleanupScope *Scope =
+ new (Buffer) EHCleanupScope(IsNormalCleanup,
+ IsEHCleanup,
+ IsActive,
+ Size,
+ BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHCleanup);
+ if (IsNormalCleanup)
+ InnermostNormalCleanup = stable_begin();
+ if (IsEHCleanup)
+ InnermostEHCleanup = stable_begin();
+
+ return Scope->getCleanupBuffer();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += Cleanup.getAllocatedSize();
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ // Destroy the cleanup.
+ Cleanup.~EHCleanupScope();
+
+ // Check whether we can shrink the branch-fixups stack.
+ if (!BranchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups())
+ BranchFixups.clear();
+
+ // Otherwise we can still trim out unnecessary nulls.
+ else
+ popNullFixups();
+ }
+}
+
+EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) {
+ char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters));
+ CatchDepth++;
+ return new (Buffer) EHFilterScope(NumFilters);
+}
+
+void EHScopeStack::popFilter() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHFilterScope &Filter = cast<EHFilterScope>(*begin());
+ StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters());
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ assert(CatchDepth > 0 && "mismatched filter push/pop");
+ CatchDepth--;
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) {
+ char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers));
+ CatchDepth++;
+ EHCatchScope *Scope = new (Buffer) EHCatchScope(NumHandlers);
+ for (unsigned I = 0; I != NumHandlers; ++I)
+ Scope->getHandlers()[I].Index = getNextEHDestIndex();
+ return Scope;
+}
+
+void EHScopeStack::pushTerminate() {
+ char *Buffer = allocate(EHTerminateScope::getSize());
+ CatchDepth++;
+ new (Buffer) EHTerminateScope(getNextEHDestIndex());
+}
+
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ assert(hasNormalCleanups());
+
+ EHScopeStack::iterator it = find(InnermostNormalCleanup);
+ unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
+
+ while (BranchFixups.size() > MinSize &&
+ BranchFixups.back().Destination == 0)
+ BranchFixups.pop_back();
+}
+
+static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
+ // void *__cxa_allocate_exception(size_t thrown_size);
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ std::vector<const llvm::Type*> Args(1, SizeTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getInt8PtrTy(CGF.getLLVMContext()),
+ Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
+}
+
+static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) {
+ // void __cxa_free_exception(void *thrown_exception);
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
+}
+
+static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
+ // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
+ // void (*dest) (void *));
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(3, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
+}
+
+static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
+ // void __cxa_rethrow();
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
+}
+
+static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
+ // void *__cxa_get_exception_ptr(void*);
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
+}
+
+static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
+ // void *__cxa_begin_catch(void*);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
+}
+
+static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
+ // void __cxa_end_catch();
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
+}
+
+static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
+ // void __cxa_call_unexepcted(void *thrown_exception);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
+}
+
+llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), Args,
+ false);
+
+ if (CGM.getLangOptions().SjLjExceptions)
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow");
+}
+
+static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
+ // void __terminate();
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy,
+ CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort");
+}
+
+static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
+ const char *Name) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
+ const llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, Name);
+}
+
+const EHPersonality EHPersonality::GNU_C("__gcc_personality_v0");
+const EHPersonality EHPersonality::NeXT_ObjC("__objc_personality_v0");
+const EHPersonality EHPersonality::GNU_CPlusPlus("__gxx_personality_v0");
+const EHPersonality EHPersonality::GNU_CPlusPlus_SJLJ("__gxx_personality_sj0");
+const EHPersonality EHPersonality::GNU_ObjC("__gnu_objc_personality_v0",
+ "objc_exception_throw");
+
+static const EHPersonality &getCPersonality(const LangOptions &L) {
+ return EHPersonality::GNU_C;
+}
+
+static const EHPersonality &getObjCPersonality(const LangOptions &L) {
+ if (L.NeXTRuntime) {
+ if (L.ObjCNonFragileABI) return EHPersonality::NeXT_ObjC;
+ else return getCPersonality(L);
+ } else {
+ return EHPersonality::GNU_ObjC;
+ }
+}
+
+static const EHPersonality &getCXXPersonality(const LangOptions &L) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_CPlusPlus_SJLJ;
+ else
+ return EHPersonality::GNU_CPlusPlus;
+}
+
+/// Determines the personality function to use when both C++
+/// and Objective-C exceptions are being caught.
+static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
+ // The ObjC personality defers to the C++ personality for non-ObjC
+ // handlers. Unlike the C++ case, we use the same personality
+ // function on targets using (backend-driven) SJLJ EH.
+ if (L.NeXTRuntime) {
+ if (L.ObjCNonFragileABI)
+ return EHPersonality::NeXT_ObjC;
+
+ // In the fragile ABI, just use C++ exception handling and hope
+ // they're not doing crazy exception mixing.
+ else
+ return getCXXPersonality(L);
+ }
+
+ // The GNU runtime's personality function inherently doesn't support
+ // mixed EH. Use the C++ personality just to avoid returning null.
+ return getCXXPersonality(L);
+}
+
+const EHPersonality &EHPersonality::get(const LangOptions &L) {
+ if (L.CPlusPlus && L.ObjC1)
+ return getObjCXXPersonality(L);
+ else if (L.CPlusPlus)
+ return getCXXPersonality(L);
+ else if (L.ObjC1)
+ return getObjCPersonality(L);
+ else
+ return getCPersonality(L);
+}
+
+static llvm::Constant *getPersonalityFn(CodeGenFunction &CGF,
+ const EHPersonality &Personality) {
+ const char *Name = Personality.getPersonalityFnName();
+
+ llvm::Constant *Fn =
+ CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getInt32Ty(
+ CGF.CGM.getLLVMContext()),
+ true),
+ Name);
+ return llvm::ConstantExpr::getBitCast(Fn, CGF.CGM.PtrToInt8Ty);
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a catch-all.
+static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
+ // Possibly we should use @llvm.eh.catch.all.value here.
+ return llvm::ConstantPointerNull::get(CGF.CGM.PtrToInt8Ty);
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a cleanup.
+static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
+ return llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
+}
+
+namespace {
+ /// A cleanup to free the exception object if its initialization
+ /// throws.
+ struct FreeExceptionCleanup : EHScopeStack::Cleanup {
+ FreeExceptionCleanup(llvm::Value *ShouldFreeVar,
+ llvm::Value *ExnLocVar)
+ : ShouldFreeVar(ShouldFreeVar), ExnLocVar(ExnLocVar) {}
+
+ llvm::Value *ShouldFreeVar;
+ llvm::Value *ExnLocVar;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
+
+ llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
+ "should-free-exnobj");
+ CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
+ CGF.EmitBlock(FreeBB);
+ llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal)
+ ->setDoesNotThrow();
+ CGF.EmitBlock(DoneBB);
+ }
+ };
+}
+
+// Emits an exception expression into the given location. This
+// differs from EmitAnyExprToMem only in that, if a final copy-ctor
+// call is required, an exception within that copy ctor causes
+// std::terminate to be invoked.
+static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
+ llvm::Value *ExnLoc) {
+ // We want to release the allocated exception object if this
+ // expression throws. We do this by pushing an EH-only cleanup
+ // block which, furthermore, deactivates itself after the expression
+ // is complete.
+ llvm::AllocaInst *ShouldFreeVar =
+ CGF.CreateTempAlloca(llvm::Type::getInt1Ty(CGF.getLLVMContext()),
+ "should-free-exnobj.var");
+ CGF.InitTempAlloca(ShouldFreeVar,
+ llvm::ConstantInt::getFalse(CGF.getLLVMContext()));
+
+ // A variable holding the exception pointer. This is necessary
+ // because the throw expression does not necessarily dominate the
+ // cleanup, for example if it appears in a conditional expression.
+ llvm::AllocaInst *ExnLocVar =
+ CGF.CreateTempAlloca(ExnLoc->getType(), "exnobj.var");
+
+ // Make sure the exception object is cleaned up if there's an
+ // exception during initialization.
+ // FIXME: stmt expressions might require this to be a normal
+ // cleanup, too.
+ CGF.EHStack.pushCleanup<FreeExceptionCleanup>(EHCleanup,
+ ShouldFreeVar,
+ ExnLocVar);
+ EHScopeStack::stable_iterator Cleanup = CGF.EHStack.stable_begin();
+
+ CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
+ ShouldFreeVar);
+
+ // __cxa_allocate_exception returns a void*; we need to cast this
+ // to the appropriate type for the object.
+ const llvm::Type *Ty = CGF.ConvertType(E->getType())->getPointerTo();
+ llvm::Value *TypedExnLoc = CGF.Builder.CreateBitCast(ExnLoc, Ty);
+
+ // FIXME: this isn't quite right! If there's a final unelided call
+ // to a copy constructor, then according to [except.terminate]p1 we
+ // must call std::terminate() if that constructor throws, because
+ // technically that copy occurs after the exception expression is
+ // evaluated but before the exception is caught. But the best way
+ // to handle that is to teach EmitAggExpr to do the final copy
+ // differently if it can't be elided.
+ CGF.EmitAnyExprToMem(E, TypedExnLoc, /*Volatile*/ false);
+
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
+ ShouldFreeVar);
+
+ // Technically, the exception object is like a temporary; it has to
+ // be cleaned up when its full-expression is complete.
+ // Unfortunately, the AST represents full-expressions by creating a
+ // CXXExprWithTemporaries, which it only does when there are actually
+ // temporaries.
+ //
+ // If any cleanups have been added since we pushed ours, they must
+ // be from temporaries; this will get popped at the same time.
+ // Otherwise we need to pop ours off. FIXME: this is very brittle.
+ if (Cleanup == CGF.EHStack.stable_begin())
+ CGF.PopCleanupBlock();
+}
+
+llvm::Value *CodeGenFunction::getExceptionSlot() {
+ if (!ExceptionSlot) {
+ const llvm::Type *i8p = llvm::Type::getInt8PtrTy(getLLVMContext());
+ ExceptionSlot = CreateTempAlloca(i8p, "exn.slot");
+ }
+ return ExceptionSlot;
+}
+
+void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
+ if (!E->getSubExpr()) {
+ if (getInvokeDest()) {
+ Builder.CreateInvoke(getReThrowFn(*this),
+ getUnreachableBlock(),
+ getInvokeDest())
+ ->setDoesNotReturn();
+ } else {
+ Builder.CreateCall(getReThrowFn(*this))->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ }
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ QualType ThrowType = E->getSubExpr()->getType();
+
+ // Now allocate the exception object.
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
+
+ llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
+ llvm::CallInst *ExceptionPtr =
+ Builder.CreateCall(AllocExceptionFn,
+ llvm::ConstantInt::get(SizeTy, TypeSize),
+ "exception");
+ ExceptionPtr->setDoesNotThrow();
+
+ EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr);
+
+ // Now throw the exception.
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+ llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, true);
+
+ // The address of the destructor. If the exception type has a
+ // trivial destructor (or isn't a record), we just pass null.
+ llvm::Constant *Dtor = 0;
+ if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!Record->hasTrivialDestructor()) {
+ CXXDestructorDecl *DtorD = Record->getDestructor();
+ Dtor = CGM.GetAddrOfCXXDestructor(DtorD, Dtor_Complete);
+ Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy);
+ }
+ }
+ if (!Dtor) Dtor = llvm::Constant::getNullValue(Int8PtrTy);
+
+ if (getInvokeDest()) {
+ llvm::InvokeInst *ThrowCall =
+ Builder.CreateInvoke3(getThrowFn(*this),
+ getUnreachableBlock(), getInvokeDest(),
+ ExceptionPtr, TypeInfo, Dtor);
+ ThrowCall->setDoesNotReturn();
+ } else {
+ llvm::CallInst *ThrowCall =
+ Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor);
+ ThrowCall->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ }
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+
+ // FIXME: For now, emit a dummy basic block because expr emitters in generally
+ // are not ready to handle emitting expressions at unreachable points.
+ EnsureInsertPoint();
+}
+
+void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
+ if (!Exceptions)
+ return;
+
+ const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD == 0)
+ return;
+ const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+ if (Proto == 0)
+ return;
+
+ assert(!Proto->hasAnyExceptionSpec() && "function with parameter pack");
+
+ if (!Proto->hasExceptionSpec())
+ return;
+
+ unsigned NumExceptions = Proto->getNumExceptions();
+ EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
+
+ for (unsigned I = 0; I != NumExceptions; ++I) {
+ QualType Ty = Proto->getExceptionType(I);
+ QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
+ Filter->setFilter(I, EHType);
+ }
+}
+
+void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
+ if (!Exceptions)
+ return;
+
+ const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD == 0)
+ return;
+ const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+ if (Proto == 0)
+ return;
+
+ if (!Proto->hasExceptionSpec())
+ return;
+
+ EHStack.popFilter();
+}
+
+void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
+ EnterCXXTryStmt(S);
+ EmitStmt(S.getTryBlock());
+ ExitCXXTryStmt(S);
+}
+
+void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ llvm::BasicBlock *Handler = createBasicBlock("catch");
+ if (C->getExceptionDecl()) {
+ // FIXME: Dropping the reference type on the type into makes it
+ // impossible to correctly implement catch-by-reference
+ // semantics for pointers. Unfortunately, this is what all
+ // existing compilers do, and it's not clear that the standard
+ // personality routine is capable of doing this right. See C++ DR 388:
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
+ QualType CaughtType = C->getCaughtType();
+ CaughtType = CaughtType.getNonReferenceType().getUnqualifiedType();
+
+ llvm::Value *TypeInfo = 0;
+ if (CaughtType->isObjCObjectPointerType())
+ TypeInfo = CGM.getObjCRuntime().GetEHType(CaughtType);
+ else
+ TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, true);
+ CatchScope->setHandler(I, TypeInfo, Handler);
+ } else {
+ // No exception decl indicates '...', a catch-all.
+ CatchScope->setCatchAllHandler(I, Handler);
+ }
+ }
+}
+
+/// Check whether this is a non-EH scope, i.e. a scope which doesn't
+/// affect exception handling. Currently, the only non-EH scopes are
+/// normal-only cleanup scopes.
+static bool isNonEHScope(const EHScope &S) {
+ switch (S.getKind()) {
+ case EHScope::Cleanup:
+ return !cast<EHCleanupScope>(S).isEHCleanup();
+ case EHScope::Filter:
+ case EHScope::Catch:
+ case EHScope::Terminate:
+ return false;
+ }
+
+ // Suppress warning.
+ return false;
+}
+
+llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
+ assert(EHStack.requiresLandingPad());
+ assert(!EHStack.empty());
+
+ if (!Exceptions)
+ return 0;
+
+ // Check the innermost scope for a cached landing pad. If this is
+ // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
+ llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
+ if (LP) return LP;
+
+ // Build the landing pad for this scope.
+ LP = EmitLandingPad();
+ assert(LP);
+
+ // Cache the landing pad on the innermost scope. If this is a
+ // non-EH scope, cache the landing pad on the enclosing scope, too.
+ for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) {
+ ir->setCachedLandingPad(LP);
+ if (!isNonEHScope(*ir)) break;
+ }
+
+ return LP;
+}
+
+llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
+ assert(EHStack.requiresLandingPad());
+
+ // This function contains a hack to work around a design flaw in
+ // LLVM's EH IR which breaks semantics after inlining. This same
+ // hack is implemented in llvm-gcc.
+ //
+ // The LLVM EH abstraction is basically a thin veneer over the
+ // traditional GCC zero-cost design: for each range of instructions
+ // in the function, there is (at most) one "landing pad" with an
+ // associated chain of EH actions. A language-specific personality
+ // function interprets this chain of actions and (1) decides whether
+ // or not to resume execution at the landing pad and (2) if so,
+ // provides an integer indicating why it's stopping. In LLVM IR,
+ // the association of a landing pad with a range of instructions is
+ // achieved via an invoke instruction, the chain of actions becomes
+ // the arguments to the @llvm.eh.selector call, and the selector
+ // call returns the integer indicator. Other than the required
+ // presence of two intrinsic function calls in the landing pad,
+ // the IR exactly describes the layout of the output code.
+ //
+ // A principal advantage of this design is that it is completely
+ // language-agnostic; in theory, the LLVM optimizers can treat
+ // landing pads neutrally, and targets need only know how to lower
+ // the intrinsics to have a functioning exceptions system (assuming
+ // that platform exceptions follow something approximately like the
+ // GCC design). Unfortunately, landing pads cannot be combined in a
+ // language-agnostic way: given selectors A and B, there is no way
+ // to make a single landing pad which faithfully represents the
+ // semantics of propagating an exception first through A, then
+ // through B, without knowing how the personality will interpret the
+ // (lowered form of the) selectors. This means that inlining has no
+ // choice but to crudely chain invokes (i.e., to ignore invokes in
+ // the inlined function, but to turn all unwindable calls into
+ // invokes), which is only semantically valid if every unwind stops
+ // at every landing pad.
+ //
+ // Therefore, the invoke-inline hack is to guarantee that every
+ // landing pad has a catch-all.
+ const bool UseInvokeInlineHack = true;
+
+ for (EHScopeStack::iterator ir = EHStack.begin(); ; ) {
+ assert(ir != EHStack.end() &&
+ "stack requiring landing pad is nothing but non-EH scopes?");
+
+ // If this is a terminate scope, just use the singleton terminate
+ // landing pad.
+ if (isa<EHTerminateScope>(*ir))
+ return getTerminateLandingPad();
+
+ // If this isn't an EH scope, iterate; otherwise break out.
+ if (!isNonEHScope(*ir)) break;
+ ++ir;
+
+ // We haven't checked this scope for a cached landing pad yet.
+ if (llvm::BasicBlock *LP = ir->getCachedLandingPad())
+ return LP;
+ }
+
+ // Save the current IR generation state.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ const EHPersonality &Personality =
+ EHPersonality::get(CGF.CGM.getLangOptions());
+
+ // Create and configure the landing pad.
+ llvm::BasicBlock *LP = createBasicBlock("lpad");
+ EmitBlock(LP);
+
+ // Save the exception pointer. It's safe to use a single exception
+ // pointer per function because EH cleanups can never have nested
+ // try/catches.
+ llvm::CallInst *Exn =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
+ Exn->setDoesNotThrow();
+ Builder.CreateStore(Exn, getExceptionSlot());
+
+ // Build the selector arguments.
+ llvm::SmallVector<llvm::Value*, 8> EHSelector;
+ EHSelector.push_back(Exn);
+ EHSelector.push_back(getPersonalityFn(*this, Personality));
+
+ // Accumulate all the handlers in scope.
+ llvm::DenseMap<llvm::Value*, UnwindDest> EHHandlers;
+ UnwindDest CatchAll;
+ bool HasEHCleanup = false;
+ bool HasEHFilter = false;
+ llvm::SmallVector<llvm::Value*, 8> EHFilters;
+ for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end();
+ I != E; ++I) {
+
+ switch (I->getKind()) {
+ case EHScope::Cleanup:
+ if (!HasEHCleanup)
+ HasEHCleanup = cast<EHCleanupScope>(*I).isEHCleanup();
+ // We otherwise don't care about cleanups.
+ continue;
+
+ case EHScope::Filter: {
+ assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
+ assert(!CatchAll.isValid() && "EH filter reached after catch-all");
+
+ // Filter scopes get added to the selector in wierd ways.
+ EHFilterScope &Filter = cast<EHFilterScope>(*I);
+ HasEHFilter = true;
+
+ // Add all the filter values which we aren't already explicitly
+ // catching.
+ for (unsigned I = 0, E = Filter.getNumFilters(); I != E; ++I) {
+ llvm::Value *FV = Filter.getFilter(I);
+ if (!EHHandlers.count(FV))
+ EHFilters.push_back(FV);
+ }
+ goto done;
+ }
+
+ case EHScope::Terminate:
+ // Terminate scopes are basically catch-alls.
+ assert(!CatchAll.isValid());
+ CatchAll = UnwindDest(getTerminateHandler(),
+ EHStack.getEnclosingEHCleanup(I),
+ cast<EHTerminateScope>(*I).getDestIndex());
+ goto done;
+
+ case EHScope::Catch:
+ break;
+ }
+
+ EHCatchScope &Catch = cast<EHCatchScope>(*I);
+ for (unsigned HI = 0, HE = Catch.getNumHandlers(); HI != HE; ++HI) {
+ EHCatchScope::Handler Handler = Catch.getHandler(HI);
+
+ // Catch-all. We should only have one of these per catch.
+ if (!Handler.Type) {
+ assert(!CatchAll.isValid());
+ CatchAll = UnwindDest(Handler.Block,
+ EHStack.getEnclosingEHCleanup(I),
+ Handler.Index);
+ continue;
+ }
+
+ // Check whether we already have a handler for this type.
+ UnwindDest &Dest = EHHandlers[Handler.Type];
+ if (Dest.isValid()) continue;
+
+ EHSelector.push_back(Handler.Type);
+ Dest = UnwindDest(Handler.Block,
+ EHStack.getEnclosingEHCleanup(I),
+ Handler.Index);
+ }
+
+ // Stop if we found a catch-all.
+ if (CatchAll.isValid()) break;
+ }
+
+ done:
+ unsigned LastToEmitInLoop = EHSelector.size();
+
+ // If we have a catch-all, add null to the selector.
+ if (CatchAll.isValid()) {
+ EHSelector.push_back(getCatchAllValue(CGF));
+
+ // If we have an EH filter, we need to add those handlers in the
+ // right place in the selector, which is to say, at the end.
+ } else if (HasEHFilter) {
+ // Create a filter expression: an integer constant saying how many
+ // filters there are (+1 to avoid ambiguity with 0 for cleanup),
+ // followed by the filter types. The personality routine only
+ // lands here if the filter doesn't match.
+ EHSelector.push_back(llvm::ConstantInt::get(Builder.getInt32Ty(),
+ EHFilters.size() + 1));
+ EHSelector.append(EHFilters.begin(), EHFilters.end());
+
+ // Also check whether we need a cleanup.
+ if (UseInvokeInlineHack || HasEHCleanup)
+ EHSelector.push_back(UseInvokeInlineHack
+ ? getCatchAllValue(CGF)
+ : getCleanupValue(CGF));
+
+ // Otherwise, signal that we at least have cleanups.
+ } else if (UseInvokeInlineHack || HasEHCleanup) {
+ EHSelector.push_back(UseInvokeInlineHack
+ ? getCatchAllValue(CGF)
+ : getCleanupValue(CGF));
+ } else {
+ assert(LastToEmitInLoop > 2);
+ LastToEmitInLoop--;
+ }
+
+ assert(EHSelector.size() >= 3 && "selector call has only two arguments!");
+
+ // Tell the backend how to generate the landing pad.
+ llvm::CallInst *Selection =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
+ EHSelector.begin(), EHSelector.end(), "eh.selector");
+ Selection->setDoesNotThrow();
+
+ // Select the right handler.
+ llvm::Value *llvm_eh_typeid_for =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+
+ // The results of llvm_eh_typeid_for aren't reliable --- at least
+ // not locally --- so we basically have to do this as an 'if' chain.
+ // We walk through the first N-1 catch clauses, testing and chaining,
+ // and then fall into the final clause (which is either a cleanup, a
+ // filter (possibly with a cleanup), a catch-all, or another catch).
+ for (unsigned I = 2; I != LastToEmitInLoop; ++I) {
+ llvm::Value *Type = EHSelector[I];
+ UnwindDest Dest = EHHandlers[Type];
+ assert(Dest.isValid() && "no handler entry for value in selector?");
+
+ // Figure out where to branch on a match. As a debug code-size
+ // optimization, if the scope depth matches the innermost cleanup,
+ // we branch directly to the catch handler.
+ llvm::BasicBlock *Match = Dest.getBlock();
+ bool MatchNeedsCleanup =
+ Dest.getScopeDepth() != EHStack.getInnermostEHCleanup();
+ if (MatchNeedsCleanup)
+ Match = createBasicBlock("eh.match");
+
+ llvm::BasicBlock *Next = createBasicBlock("eh.next");
+
+ // Check whether the exception matches.
+ llvm::CallInst *Id
+ = Builder.CreateCall(llvm_eh_typeid_for,
+ Builder.CreateBitCast(Type, CGM.PtrToInt8Ty));
+ Id->setDoesNotThrow();
+ Builder.CreateCondBr(Builder.CreateICmpEQ(Selection, Id),
+ Match, Next);
+
+ // Emit match code if necessary.
+ if (MatchNeedsCleanup) {
+ EmitBlock(Match);
+ EmitBranchThroughEHCleanup(Dest);
+ }
+
+ // Continue to the next match.
+ EmitBlock(Next);
+ }
+
+ // Emit the final case in the selector.
+ // This might be a catch-all....
+ if (CatchAll.isValid()) {
+ assert(isa<llvm::ConstantPointerNull>(EHSelector.back()));
+ EmitBranchThroughEHCleanup(CatchAll);
+
+ // ...or an EH filter...
+ } else if (HasEHFilter) {
+ llvm::Value *SavedSelection = Selection;
+
+ // First, unwind out to the outermost scope if necessary.
+ if (EHStack.hasEHCleanups()) {
+ // The end here might not dominate the beginning, so we might need to
+ // save the selector if we need it.
+ llvm::AllocaInst *SelectorVar = 0;
+ if (HasEHCleanup) {
+ SelectorVar = CreateTempAlloca(Builder.getInt32Ty(), "selector.var");
+ Builder.CreateStore(Selection, SelectorVar);
+ }
+
+ llvm::BasicBlock *CleanupContBB = createBasicBlock("ehspec.cleanup.cont");
+ EmitBranchThroughEHCleanup(UnwindDest(CleanupContBB, EHStack.stable_end(),
+ EHStack.getNextEHDestIndex()));
+ EmitBlock(CleanupContBB);
+
+ if (HasEHCleanup)
+ SavedSelection = Builder.CreateLoad(SelectorVar, "ehspec.saved-selector");
+ }
+
+ // If there was a cleanup, we'll need to actually check whether we
+ // landed here because the filter triggered.
+ if (UseInvokeInlineHack || HasEHCleanup) {
+ llvm::BasicBlock *RethrowBB = createBasicBlock("cleanup");
+ llvm::BasicBlock *UnexpectedBB = createBasicBlock("ehspec.unexpected");
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(Builder.getInt32Ty(), 0);
+ llvm::Value *FailsFilter =
+ Builder.CreateICmpSLT(SavedSelection, Zero, "ehspec.fails");
+ Builder.CreateCondBr(FailsFilter, UnexpectedBB, RethrowBB);
+
+ // The rethrow block is where we land if this was a cleanup.
+ // TODO: can this be _Unwind_Resume if the InvokeInlineHack is off?
+ EmitBlock(RethrowBB);
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ EmitBlock(UnexpectedBB);
+ }
+
+ // Call __cxa_call_unexpected. This doesn't need to be an invoke
+ // because __cxa_call_unexpected magically filters exceptions
+ // according to the last landing pad the exception was thrown
+ // into. Seriously.
+ Builder.CreateCall(getUnexpectedFn(*this),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ // ...or a normal catch handler...
+ } else if (!UseInvokeInlineHack && !HasEHCleanup) {
+ llvm::Value *Type = EHSelector.back();
+ EmitBranchThroughEHCleanup(EHHandlers[Type]);
+
+ // ...or a cleanup.
+ } else {
+ EmitBranchThroughEHCleanup(getRethrowDest());
+ }
+
+ // Restore the old IR generation state.
+ Builder.restoreIP(SavedIP);
+
+ return LP;
+}
+
+namespace {
+ /// A cleanup to call __cxa_end_catch. In many cases, the caught
+ /// exception type lets us state definitively that the thrown exception
+ /// type does not have a destructor. In particular:
+ /// - Catch-alls tell us nothing, so we have to conservatively
+ /// assume that the thrown exception might have a destructor.
+ /// - Catches by reference behave according to their base types.
+ /// - Catches of non-record types will only trigger for exceptions
+ /// of non-record types, which never have destructors.
+ /// - Catches of record types can trigger for arbitrary subclasses
+ /// of the caught type, so we have to assume the actual thrown
+ /// exception type might have a throwing destructor, even if the
+ /// caught type's destructor is trivial or nothrow.
+ struct CallEndCatch : EHScopeStack::Cleanup {
+ CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
+ bool MightThrow;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(getEndCatchFn(CGF), 0, 0);
+ }
+ };
+}
+
+/// Emits a call to __cxa_begin_catch and enters a cleanup to call
+/// __cxa_end_catch.
+///
+/// \param EndMightThrow - true if __cxa_end_catch might throw
+static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
+ llvm::Value *Exn,
+ bool EndMightThrow) {
+ llvm::CallInst *Call = CGF.Builder.CreateCall(getBeginCatchFn(CGF), Exn);
+ Call->setDoesNotThrow();
+
+ CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
+
+ return Call;
+}
+
+/// A "special initializer" callback for initializing a catch
+/// parameter during catch initialization.
+static void InitCatchParam(CodeGenFunction &CGF,
+ const VarDecl &CatchParam,
+ llvm::Value *ParamAddr) {
+ // Load the exception from where the landing pad saved it.
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
+
+ CanQualType CatchType =
+ CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
+ const llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
+
+ // If we're catching by reference, we can just cast the object
+ // pointer to the appropriate pointer.
+ if (isa<ReferenceType>(CatchType)) {
+ QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
+ bool EndCatchMightThrow = CaughtType->isRecordType();
+
+ // __cxa_begin_catch returns the adjusted object pointer.
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
+
+ // We have no way to tell the personality function that we're
+ // catching by reference, so if we're catching a pointer,
+ // __cxa_begin_catch will actually return that pointer by value.
+ if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
+ QualType PointeeType = PT->getPointeeType();
+
+ // When catching by reference, generally we should just ignore
+ // this by-value pointer and use the exception object instead.
+ if (!PointeeType->isRecordType()) {
+
+ // Exn points to the struct _Unwind_Exception header, which
+ // we have to skip past in order to reach the exception data.
+ unsigned HeaderSize =
+ CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
+ AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
+
+ // However, if we're catching a pointer-to-record type that won't
+ // work, because the personality function might have adjusted
+ // the pointer. There's actually no way for us to fully satisfy
+ // the language/ABI contract here: we can't use Exn because it
+ // might have the wrong adjustment, but we can't use the by-value
+ // pointer because it's off by a level of abstraction.
+ //
+ // The current solution is to dump the adjusted pointer into an
+ // alloca, which breaks language semantics (because changing the
+ // pointer doesn't change the exception) but at least works.
+ // The better solution would be to filter out non-exact matches
+ // and rethrow them, but this is tricky because the rethrow
+ // really needs to be catchable by other sites at this landing
+ // pad. The best solution is to fix the personality function.
+ } else {
+ // Pull the pointer for the reference type off.
+ const llvm::Type *PtrTy =
+ cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
+
+ // Create the temporary and write the adjusted pointer into it.
+ llvm::Value *ExnPtrTmp = CGF.CreateTempAlloca(PtrTy, "exn.byref.tmp");
+ llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ CGF.Builder.CreateStore(Casted, ExnPtrTmp);
+
+ // Bind the reference to the temporary.
+ AdjustedExn = ExnPtrTmp;
+ }
+ }
+
+ llvm::Value *ExnCast =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
+ CGF.Builder.CreateStore(ExnCast, ParamAddr);
+ return;
+ }
+
+ // Non-aggregates (plus complexes).
+ bool IsComplex = false;
+ if (!CGF.hasAggregateLLVMType(CatchType) ||
+ (IsComplex = CatchType->isAnyComplexType())) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
+
+ // If the catch type is a pointer type, __cxa_begin_catch returns
+ // the pointer by value.
+ if (CatchType->hasPointerRepresentation()) {
+ llvm::Value *CastExn =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
+ CGF.Builder.CreateStore(CastExn, ParamAddr);
+ return;
+ }
+
+ // Otherwise, it returns a pointer into the exception object.
+
+ const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+
+ if (IsComplex) {
+ CGF.StoreComplexToAddr(CGF.LoadComplexFromAddr(Cast, /*volatile*/ false),
+ ParamAddr, /*volatile*/ false);
+ } else {
+ unsigned Alignment =
+ CGF.getContext().getDeclAlign(&CatchParam).getQuantity();
+ llvm::Value *ExnLoad = CGF.Builder.CreateLoad(Cast, "exn.scalar");
+ CGF.EmitStoreOfScalar(ExnLoad, ParamAddr, /*volatile*/ false, Alignment,
+ CatchType);
+ }
+ return;
+ }
+
+ // FIXME: this *really* needs to be done via a proper, Sema-emitted
+ // initializer expression.
+
+ CXXRecordDecl *RD = CatchType.getTypePtr()->getAsCXXRecordDecl();
+ assert(RD && "aggregate catch type was not a record!");
+
+ const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+
+ if (RD->hasTrivialCopyConstructor()) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, true);
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ CGF.EmitAggregateCopy(ParamAddr, Cast, CatchType);
+ return;
+ }
+
+ // We have to call __cxa_get_exception_ptr to get the adjusted
+ // pointer before copying.
+ llvm::CallInst *AdjustedExn =
+ CGF.Builder.CreateCall(getGetExceptionPtrFn(CGF), Exn);
+ AdjustedExn->setDoesNotThrow();
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+
+ CXXConstructorDecl *CD = RD->getCopyConstructor(CGF.getContext(), 0);
+ assert(CD && "record has no copy constructor!");
+ llvm::Value *CopyCtor = CGF.CGM.GetAddrOfCXXConstructor(CD, Ctor_Complete);
+
+ CallArgList CallArgs;
+ CallArgs.push_back(std::make_pair(RValue::get(ParamAddr),
+ CD->getThisType(CGF.getContext())));
+ CallArgs.push_back(std::make_pair(RValue::get(Cast),
+ CD->getParamDecl(0)->getType()));
+
+ const FunctionProtoType *FPT
+ = CD->getType()->getAs<FunctionProtoType>();
+
+ // Call the copy ctor in a terminate scope.
+ CGF.EHStack.pushTerminate();
+ CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+ CopyCtor, ReturnValueSlot(), CallArgs, CD);
+ CGF.EHStack.popTerminate();
+
+ // Finally we can call __cxa_begin_catch.
+ CallBeginCatch(CGF, Exn, true);
+}
+
+/// Begins a catch statement by initializing the catch variable and
+/// calling __cxa_begin_catch.
+static void BeginCatch(CodeGenFunction &CGF,
+ const CXXCatchStmt *S) {
+ // We have to be very careful with the ordering of cleanups here:
+ // C++ [except.throw]p4:
+ // The destruction [of the exception temporary] occurs
+ // immediately after the destruction of the object declared in
+ // the exception-declaration in the handler.
+ //
+ // So the precise ordering is:
+ // 1. Construct catch variable.
+ // 2. __cxa_begin_catch
+ // 3. Enter __cxa_end_catch cleanup
+ // 4. Enter dtor cleanup
+ //
+ // We do this by initializing the exception variable with a
+ // "special initializer", InitCatchParam. Delegation sequence:
+ // - ExitCXXTryStmt opens a RunCleanupsScope
+ // - EmitLocalBlockVarDecl creates the variable and debug info
+ // - InitCatchParam initializes the variable from the exception
+ // - CallBeginCatch calls __cxa_begin_catch
+ // - CallBeginCatch enters the __cxa_end_catch cleanup
+ // - EmitLocalBlockVarDecl enters the variable destructor cleanup
+ // - EmitCXXTryStmt emits the code for the catch body
+ // - EmitCXXTryStmt close the RunCleanupsScope
+
+ VarDecl *CatchParam = S->getExceptionDecl();
+ if (!CatchParam) {
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
+ CallBeginCatch(CGF, Exn, true);
+ return;
+ }
+
+ // Emit the local.
+ CGF.EmitLocalBlockVarDecl(*CatchParam, &InitCatchParam);
+}
+
+namespace {
+ struct CallRethrow : EHScopeStack::Cleanup {
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.EmitCallOrInvoke(getReThrowFn(CGF), 0, 0);
+ }
+ };
+}
+
+void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
+ assert(CatchScope.getNumHandlers() == NumHandlers);
+
+ // Copy the handler blocks off before we pop the EH stack. Emitting
+ // the handlers might scribble on this memory.
+ llvm::SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers);
+ memcpy(Handlers.data(), CatchScope.begin(),
+ NumHandlers * sizeof(EHCatchScope::Handler));
+ EHStack.popCatch();
+
+ // The fall-through block.
+ llvm::BasicBlock *ContBB = createBasicBlock("try.cont");
+
+ // We just emitted the body of the try; jump to the continue block.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+
+ // Determine if we need an implicit rethrow for all these catch handlers.
+ bool ImplicitRethrow = false;
+ if (IsFnTryBlock)
+ ImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
+ isa<CXXConstructorDecl>(CurCodeDecl);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ llvm::BasicBlock *CatchBlock = Handlers[I].Block;
+ EmitBlock(CatchBlock);
+
+ // Catch the exception if this isn't a catch-all.
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ // Enter a cleanup scope, including the catch variable and the
+ // end-catch.
+ RunCleanupsScope CatchScope(*this);
+
+ // Initialize the catch variable and set up the cleanups.
+ BeginCatch(*this, C);
+
+ // If there's an implicit rethrow, push a normal "cleanup" to call
+ // _cxa_rethrow. This needs to happen before __cxa_end_catch is
+ // called, and so it is pushed after BeginCatch.
+ if (ImplicitRethrow)
+ EHStack.pushCleanup<CallRethrow>(NormalCleanup);
+
+ // Perform the body of the catch.
+ EmitStmt(C->getHandlerBlock());
+
+ // Fall out through the catch cleanups.
+ CatchScope.ForceCleanup();
+
+ // Branch out of the try.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+ }
+
+ EmitBlock(ContBB);
+}
+
+namespace {
+ struct CallEndCatchForFinally : EHScopeStack::Cleanup {
+ llvm::Value *ForEHVar;
+ llvm::Value *EndCatchFn;
+ CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
+ : ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
+ llvm::BasicBlock *CleanupContBB =
+ CGF.createBasicBlock("finally.cleanup.cont");
+
+ llvm::Value *ShouldEndCatch =
+ CGF.Builder.CreateLoad(ForEHVar, "finally.endcatch");
+ CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
+ CGF.EmitBlock(EndCatchBB);
+ CGF.EmitCallOrInvoke(EndCatchFn, 0, 0); // catch-all, so might throw
+ CGF.EmitBlock(CleanupContBB);
+ }
+ };
+
+ struct PerformFinally : EHScopeStack::Cleanup {
+ const Stmt *Body;
+ llvm::Value *ForEHVar;
+ llvm::Value *EndCatchFn;
+ llvm::Value *RethrowFn;
+ llvm::Value *SavedExnVar;
+
+ PerformFinally(const Stmt *Body, llvm::Value *ForEHVar,
+ llvm::Value *EndCatchFn,
+ llvm::Value *RethrowFn, llvm::Value *SavedExnVar)
+ : Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
+ RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // Enter a cleanup to call the end-catch function if one was provided.
+ if (EndCatchFn)
+ CGF.EHStack.pushCleanup<CallEndCatchForFinally>(NormalAndEHCleanup,
+ ForEHVar, EndCatchFn);
+
+ // Save the current cleanup destination in case there are
+ // cleanups in the finally block.
+ llvm::Value *SavedCleanupDest =
+ CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot(),
+ "cleanup.dest.saved");
+
+ // Emit the finally block.
+ CGF.EmitStmt(Body);
+
+ // If the end of the finally is reachable, check whether this was
+ // for EH. If so, rethrow.
+ if (CGF.HaveInsertPoint()) {
+ llvm::BasicBlock *RethrowBB = CGF.createBasicBlock("finally.rethrow");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock("finally.cont");
+
+ llvm::Value *ShouldRethrow =
+ CGF.Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
+ CGF.Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
+
+ CGF.EmitBlock(RethrowBB);
+ if (SavedExnVar) {
+ llvm::Value *Args[] = { CGF.Builder.CreateLoad(SavedExnVar) };
+ CGF.EmitCallOrInvoke(RethrowFn, Args, Args+1);
+ } else {
+ CGF.EmitCallOrInvoke(RethrowFn, 0, 0);
+ }
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(ContBB);
+
+ // Restore the cleanup destination.
+ CGF.Builder.CreateStore(SavedCleanupDest,
+ CGF.getNormalCleanupDestSlot());
+ }
+
+ // Leave the end-catch cleanup. As an optimization, pretend that
+ // the fallthrough path was inaccessible; we've dynamically proven
+ // that we're not in the EH case along that path.
+ if (EndCatchFn) {
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.PopCleanupBlock();
+ CGF.Builder.restoreIP(SavedIP);
+ }
+
+ // Now make sure we actually have an insertion point or the
+ // cleanup gods will hate us.
+ CGF.EnsureInsertPoint();
+ }
+ };
+}
+
+/// Enters a finally block for an implementation using zero-cost
+/// exceptions. This is mostly general, but hard-codes some
+/// language/ABI-specific behavior in the catch-all sections.
+CodeGenFunction::FinallyInfo
+CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
+ llvm::Constant *BeginCatchFn,
+ llvm::Constant *EndCatchFn,
+ llvm::Constant *RethrowFn) {
+ assert((BeginCatchFn != 0) == (EndCatchFn != 0) &&
+ "begin/end catch functions not paired");
+ assert(RethrowFn && "rethrow function is required");
+
+ // The rethrow function has one of the following two types:
+ // void (*)()
+ // void (*)(void*)
+ // In the latter case we need to pass it the exception object.
+ // But we can't use the exception slot because the @finally might
+ // have a landing pad (which would overwrite the exception slot).
+ const llvm::FunctionType *RethrowFnTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(RethrowFn->getType())
+ ->getElementType());
+ llvm::Value *SavedExnVar = 0;
+ if (RethrowFnTy->getNumParams())
+ SavedExnVar = CreateTempAlloca(Builder.getInt8PtrTy(), "finally.exn");
+
+ // A finally block is a statement which must be executed on any edge
+ // out of a given scope. Unlike a cleanup, the finally block may
+ // contain arbitrary control flow leading out of itself. In
+ // addition, finally blocks should always be executed, even if there
+ // are no catch handlers higher on the stack. Therefore, we
+ // surround the protected scope with a combination of a normal
+ // cleanup (to catch attempts to break out of the block via normal
+ // control flow) and an EH catch-all (semantically "outside" any try
+ // statement to which the finally block might have been attached).
+ // The finally block itself is generated in the context of a cleanup
+ // which conditionally leaves the catch-all.
+
+ FinallyInfo Info;
+
+ // Jump destination for performing the finally block on an exception
+ // edge. We'll never actually reach this block, so unreachable is
+ // fine.
+ JumpDest RethrowDest = getJumpDestInCurrentScope(getUnreachableBlock());
+
+ // Whether the finally block is being executed for EH purposes.
+ llvm::AllocaInst *ForEHVar = CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ "finally.for-eh");
+ InitTempAlloca(ForEHVar, llvm::ConstantInt::getFalse(getLLVMContext()));
+
+ // Enter a normal cleanup which will perform the @finally block.
+ EHStack.pushCleanup<PerformFinally>(NormalCleanup, Body,
+ ForEHVar, EndCatchFn,
+ RethrowFn, SavedExnVar);
+
+ // Enter a catch-all scope.
+ llvm::BasicBlock *CatchAllBB = createBasicBlock("finally.catchall");
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
+ Builder.SetInsertPoint(CatchAllBB);
+
+ // If there's a begin-catch function, call it.
+ if (BeginCatchFn) {
+ Builder.CreateCall(BeginCatchFn, Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotThrow();
+ }
+
+ // If we need to remember the exception pointer to rethrow later, do so.
+ if (SavedExnVar) {
+ llvm::Value *SavedExn = Builder.CreateLoad(getExceptionSlot());
+ Builder.CreateStore(SavedExn, SavedExnVar);
+ }
+
+ // Tell the finally block that we're in EH.
+ Builder.CreateStore(llvm::ConstantInt::getTrue(getLLVMContext()), ForEHVar);
+
+ // Thread a jump through the finally cleanup.
+ EmitBranchThroughCleanup(RethrowDest);
+
+ Builder.restoreIP(SavedIP);
+
+ EHCatchScope *CatchScope = EHStack.pushCatch(1);
+ CatchScope->setCatchAllHandler(0, CatchAllBB);
+
+ return Info;
+}
+
+void CodeGenFunction::ExitFinallyBlock(FinallyInfo &Info) {
+ // Leave the finally catch-all.
+ EHCatchScope &Catch = cast<EHCatchScope>(*EHStack.begin());
+ llvm::BasicBlock *CatchAllBB = Catch.getHandler(0).Block;
+ EHStack.popCatch();
+
+ // And leave the normal cleanup.
+ PopCleanupBlock();
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ EmitBlock(CatchAllBB, true);
+
+ Builder.restoreIP(SavedIP);
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
+ if (TerminateLandingPad)
+ return TerminateLandingPad;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // This will get inserted at the end of the function.
+ TerminateLandingPad = createBasicBlock("terminate.lpad");
+ Builder.SetInsertPoint(TerminateLandingPad);
+
+ // Tell the backend that this is a landing pad.
+ llvm::CallInst *Exn =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
+ Exn->setDoesNotThrow();
+
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOptions());
+
+ // Tell the backend what the exception table should be:
+ // nothing but a catch-all.
+ llvm::Value *Args[3] = { Exn, getPersonalityFn(*this, Personality),
+ getCatchAllValue(*this) };
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
+ Args, Args+3, "eh.selector")
+ ->setDoesNotThrow();
+
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
+ TerminateCall->setDoesNotReturn();
+ TerminateCall->setDoesNotThrow();
+ CGF.Builder.CreateUnreachable();
+
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
+
+ return TerminateLandingPad;
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
+ if (TerminateHandler)
+ return TerminateHandler;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // Set up the terminate handler. This block is inserted at the very
+ // end of the function by FinishFunction.
+ TerminateHandler = createBasicBlock("terminate.handler");
+ Builder.SetInsertPoint(TerminateHandler);
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
+ TerminateCall->setDoesNotReturn();
+ TerminateCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
+
+ return TerminateHandler;
+}
+
+CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
+ if (RethrowBlock.isValid()) return RethrowBlock;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
+
+ // We emit a jump to a notional label at the outermost unwind state.
+ llvm::BasicBlock *Unwind = createBasicBlock("eh.resume");
+ Builder.SetInsertPoint(Unwind);
+
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOptions());
+
+ // This can always be a call because we necessarily didn't find
+ // anything on the EH stack which needs our help.
+ llvm::Constant *RethrowFn;
+ if (const char *RethrowName = Personality.getCatchallRethrowFnName())
+ RethrowFn = getCatchallRethrowFn(*this, RethrowName);
+ else
+ RethrowFn = getUnwindResumeOrRethrowFn();
+
+ Builder.CreateCall(RethrowFn, Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ Builder.restoreIP(SavedIP);
+
+ RethrowBlock = UnwindDest(Unwind, EHStack.stable_end(), 0);
+ return RethrowBlock;
+}
+
+EHScopeStack::Cleanup::~Cleanup() {
+ llvm_unreachable("Cleanup is indestructable");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.h b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
new file mode 100644
index 0000000..f129474
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
@@ -0,0 +1,593 @@
+//===-- CGException.h - Classes for exceptions IR generation ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of LLVM IR for exceptions in
+// C++ and Objective C.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGEXCEPTION_H
+#define CLANG_CODEGEN_CGEXCEPTION_H
+
+/// EHScopeStack is defined in CodeGenFunction.h, but its
+/// implementation is in this file and in CGException.cpp.
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Value;
+ class BasicBlock;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// The exceptions personality for a function. When
+class EHPersonality {
+ const char *PersonalityFn;
+
+ // If this is non-null, this personality requires a non-standard
+ // function for rethrowing an exception after a catchall cleanup.
+ // This function must have prototype void(void*).
+ const char *CatchallRethrowFn;
+
+ EHPersonality(const char *PersonalityFn,
+ const char *CatchallRethrowFn = 0)
+ : PersonalityFn(PersonalityFn),
+ CatchallRethrowFn(CatchallRethrowFn) {}
+
+public:
+ static const EHPersonality &get(const LangOptions &Lang);
+ static const EHPersonality GNU_C;
+ static const EHPersonality GNU_ObjC;
+ static const EHPersonality NeXT_ObjC;
+ static const EHPersonality GNU_CPlusPlus;
+ static const EHPersonality GNU_CPlusPlus_SJLJ;
+
+ const char *getPersonalityFnName() const { return PersonalityFn; }
+ const char *getCatchallRethrowFnName() const { return CatchallRethrowFn; }
+};
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ llvm::BasicBlock *CachedLandingPad;
+
+ unsigned K : 2;
+
+protected:
+ enum { BitsRemaining = 30 };
+
+public:
+ enum Kind { Cleanup, Catch, Terminate, Filter };
+
+ EHScope(Kind K) : CachedLandingPad(0), K(K) {}
+
+ Kind getKind() const { return static_cast<Kind>(K); }
+
+ llvm::BasicBlock *getCachedLandingPad() const {
+ return CachedLandingPad;
+ }
+
+ void setCachedLandingPad(llvm::BasicBlock *Block) {
+ CachedLandingPad = Block;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C @finally blocks are represented using a cleanup scope
+/// after the catch scope.
+class EHCatchScope : public EHScope {
+ unsigned NumHandlers : BitsRemaining;
+
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null (C++ null, not an LLVM null pointer)
+ /// for a catch-all.
+ llvm::Value *Type;
+
+ /// The catch handler for this type.
+ llvm::BasicBlock *Block;
+
+ /// The unwind destination index for this handler.
+ unsigned Index;
+ };
+
+private:
+ friend class EHScopeStack;
+
+ Handler *getHandlers() {
+ return reinterpret_cast<Handler*>(this+1);
+ }
+
+ const Handler *getHandlers() const {
+ return reinterpret_cast<const Handler*>(this+1);
+ }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned N) {
+ return sizeof(EHCatchScope) + N * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned NumHandlers)
+ : EHScope(Catch), NumHandlers(NumHandlers) {
+ }
+
+ unsigned getNumHandlers() const {
+ return NumHandlers;
+ }
+
+ void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
+ setHandler(I, /*catchall*/ 0, Block);
+ }
+
+ void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
+ getHandlers()[I].Type = Type;
+ getHandlers()[I].Block = Block;
+ }
+
+ const Handler &getHandler(unsigned I) const {
+ assert(I < getNumHandlers());
+ return getHandlers()[I];
+ }
+
+ typedef const Handler *iterator;
+ iterator begin() const { return getHandlers(); }
+ iterator end() const { return getHandlers() + getNumHandlers(); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Catch;
+ }
+};
+
+/// A cleanup scope which generates the cleanup blocks lazily.
+class EHCleanupScope : public EHScope {
+ /// Whether this cleanup needs to be run along normal edges.
+ bool IsNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ bool IsEHCleanup : 1;
+
+ /// Whether this cleanup was activated before all normal uses.
+ bool ActivatedBeforeNormalUse : 1;
+
+ /// Whether this cleanup was activated before all EH uses.
+ bool ActivatedBeforeEHUse : 1;
+
+ /// The amount of extra storage needed by the Cleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned CleanupSize : 12;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : BitsRemaining - 16;
+
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *NormalBlock;
+
+ /// The dual entry/exit block along the EH edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *EHBlock;
+
+ /// An optional i1 variable indicating whether this cleanup has been
+ /// activated yet. This has one of three states:
+ /// - it is null if the cleanup is inactive
+ /// - it is activeSentinel() if the cleanup is active and was not
+ /// required before activation
+ /// - it points to a valid variable
+ llvm::AllocaInst *ActiveVar;
+
+ /// Extra information required for cleanups that have resolved
+ /// branches through them. This has to be allocated on the side
+ /// because everything on the cleanup stack has be trivially
+ /// movable.
+ struct ExtInfo {
+ /// The destinations of normal branch-afters and branch-throughs.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
+
+ /// Normal branch-afters.
+ llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ BranchAfters;
+
+ /// The destinations of EH branch-afters and branch-throughs.
+ /// TODO: optimize for the extremely common case of a single
+ /// branch-through.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> EHBranches;
+
+ /// EH branch-afters.
+ llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ EHBranchAfters;
+ };
+ mutable struct ExtInfo *ExtInfo;
+
+ struct ExtInfo &getExtInfo() {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+ const struct ExtInfo &getExtInfo() const {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t Size) {
+ return sizeof(EHCleanupScope) + Size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHCleanupScope) + CleanupSize;
+ }
+
+ EHCleanupScope(bool IsNormal, bool IsEH, bool IsActive,
+ unsigned CleanupSize, unsigned FixupDepth,
+ EHScopeStack::stable_iterator EnclosingNormal,
+ EHScopeStack::stable_iterator EnclosingEH)
+ : EHScope(EHScope::Cleanup),
+ IsNormalCleanup(IsNormal), IsEHCleanup(IsEH),
+ ActivatedBeforeNormalUse(IsActive),
+ ActivatedBeforeEHUse(IsActive),
+ CleanupSize(CleanupSize), FixupDepth(FixupDepth),
+ EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
+ NormalBlock(0), EHBlock(0),
+ ActiveVar(IsActive ? activeSentinel() : 0),
+ ExtInfo(0)
+ {
+ assert(this->CleanupSize == CleanupSize && "cleanup size overflow");
+ }
+
+ ~EHCleanupScope() {
+ delete ExtInfo;
+ }
+
+ bool isNormalCleanup() const { return IsNormalCleanup; }
+ llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
+ void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
+
+ bool isEHCleanup() const { return IsEHCleanup; }
+ llvm::BasicBlock *getEHBlock() const { return EHBlock; }
+ void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
+
+ static llvm::AllocaInst *activeSentinel() {
+ return reinterpret_cast<llvm::AllocaInst*>(1);
+ }
+
+ bool isActive() const { return ActiveVar != 0; }
+ llvm::AllocaInst *getActiveVar() const { return ActiveVar; }
+ void setActiveVar(llvm::AllocaInst *Var) { ActiveVar = Var; }
+
+ bool wasActivatedBeforeNormalUse() const { return ActivatedBeforeNormalUse; }
+ void setActivatedBeforeNormalUse(bool B) { ActivatedBeforeNormalUse = B; }
+
+ bool wasActivatedBeforeEHUse() const { return ActivatedBeforeEHUse; }
+ void setActivatedBeforeEHUse(bool B) { ActivatedBeforeEHUse = B; }
+
+ unsigned getFixupDepth() const { return FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+ EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
+ return EnclosingEH;
+ }
+
+ size_t getCleanupSize() const { return CleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::Cleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer());
+ }
+
+ /// True if this cleanup scope has any branch-afters or branch-throughs.
+ bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); }
+
+ /// Add a branch-after to this cleanup scope. A branch-after is a
+ /// branch from a point protected by this (normal) cleanup to a
+ /// point in the normal cleanup scope immediately containing it.
+ /// For example,
+ /// for (;;) { A a; break; }
+ /// contains a branch-after.
+ ///
+ /// Branch-afters each have their own destination out of the
+ /// cleanup, guaranteed distinct from anything else threaded through
+ /// it. Therefore branch-afters usually force a switch after the
+ /// cleanup.
+ void addBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.Branches.insert(Block))
+ ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
+ }
+
+ /// Return the number of unique branch-afters on this scope.
+ unsigned getNumBranchAfters() const {
+ return ExtInfo ? ExtInfo->BranchAfters.size() : 0;
+ }
+
+ llvm::BasicBlock *getBranchAfterBlock(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].first;
+ }
+
+ llvm::ConstantInt *getBranchAfterIndex(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].second;
+ }
+
+ /// Add a branch-through to this cleanup scope. A branch-through is
+ /// a branch from a scope protected by this (normal) cleanup to an
+ /// enclosing scope other than the immediately-enclosing normal
+ /// cleanup scope.
+ ///
+ /// In the following example, the branch through B's scope is a
+ /// branch-through, while the branch through A's scope is a
+ /// branch-after:
+ /// for (;;) { A a; B b; break; }
+ ///
+ /// All branch-throughs have a common destination out of the
+ /// cleanup, one possibly shared with the fall-through. Therefore
+ /// branch-throughs usually don't force a switch after the cleanup.
+ ///
+ /// \return true if the branch-through was new to this scope
+ bool addBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().Branches.insert(Block);
+ }
+
+ /// Determines if this cleanup scope has any branch throughs.
+ bool hasBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
+ }
+
+ // Same stuff, only for EH branches instead of normal branches.
+ // It's quite possible that we could find a better representation
+ // for this.
+
+ bool hasEHBranches() const { return ExtInfo && !ExtInfo->EHBranches.empty(); }
+ void addEHBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.EHBranches.insert(Block))
+ ExtInfo.EHBranchAfters.push_back(std::make_pair(Block, Index));
+ }
+
+ unsigned getNumEHBranchAfters() const {
+ return ExtInfo ? ExtInfo->EHBranchAfters.size() : 0;
+ }
+
+ llvm::BasicBlock *getEHBranchAfterBlock(unsigned I) const {
+ assert(I < getNumEHBranchAfters());
+ return ExtInfo->EHBranchAfters[I].first;
+ }
+
+ llvm::ConstantInt *getEHBranchAfterIndex(unsigned I) const {
+ assert(I < getNumEHBranchAfters());
+ return ExtInfo->EHBranchAfters[I].second;
+ }
+
+ bool addEHBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().EHBranches.insert(Block);
+ }
+
+ bool hasEHBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->EHBranchAfters.size() != ExtInfo->EHBranches.size());
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return (Scope->getKind() == Cleanup);
+ }
+};
+
+/// An exceptions scope which filters exceptions thrown through it.
+/// Only exceptions matching the filter types will be permitted to be
+/// thrown.
+///
+/// This is used to implement C++ exception specifications.
+class EHFilterScope : public EHScope {
+ unsigned NumFilters : BitsRemaining;
+
+ // Essentially ends in a flexible array member:
+ // llvm::Value *FilterTypes[0];
+
+ llvm::Value **getFilters() {
+ return reinterpret_cast<llvm::Value**>(this+1);
+ }
+
+ llvm::Value * const *getFilters() const {
+ return reinterpret_cast<llvm::Value* const *>(this+1);
+ }
+
+public:
+ EHFilterScope(unsigned NumFilters) :
+ EHScope(Filter), NumFilters(NumFilters) {}
+
+ static size_t getSizeForNumFilters(unsigned NumFilters) {
+ return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*);
+ }
+
+ unsigned getNumFilters() const { return NumFilters; }
+
+ void setFilter(unsigned I, llvm::Value *FilterValue) {
+ assert(I < getNumFilters());
+ getFilters()[I] = FilterValue;
+ }
+
+ llvm::Value *getFilter(unsigned I) const {
+ assert(I < getNumFilters());
+ return getFilters()[I];
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Filter;
+ }
+};
+
+/// An exceptions scope which calls std::terminate if any exception
+/// reaches it.
+class EHTerminateScope : public EHScope {
+ unsigned DestIndex : BitsRemaining;
+public:
+ EHTerminateScope(unsigned Index) : EHScope(Terminate), DestIndex(Index) {}
+ static size_t getSize() { return sizeof(EHTerminateScope); }
+
+ unsigned getDestIndex() const { return DestIndex; }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Terminate;
+ }
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *Ptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *Ptr) : Ptr(Ptr) {}
+
+public:
+ iterator() : Ptr(0) {}
+
+ EHScope *get() const {
+ return reinterpret_cast<EHScope*>(Ptr);
+ }
+
+ EHScope *operator->() const { return get(); }
+ EHScope &operator*() const { return *get(); }
+
+ iterator &operator++() {
+ switch (get()->getKind()) {
+ case EHScope::Catch:
+ Ptr += EHCatchScope::getSizeForNumHandlers(
+ static_cast<const EHCatchScope*>(get())->getNumHandlers());
+ break;
+
+ case EHScope::Filter:
+ Ptr += EHFilterScope::getSizeForNumFilters(
+ static_cast<const EHFilterScope*>(get())->getNumFilters());
+ break;
+
+ case EHScope::Cleanup:
+ Ptr += static_cast<const EHCleanupScope*>(get())
+ ->getAllocatedSize();
+ break;
+
+ case EHScope::Terminate:
+ Ptr += EHTerminateScope::getSize();
+ break;
+ }
+
+ return *this;
+ }
+
+ iterator next() {
+ iterator copy = *this;
+ ++copy;
+ return copy;
+ }
+
+ iterator operator++(int) {
+ iterator copy = *this;
+ operator++();
+ return copy;
+ }
+
+ bool encloses(iterator other) const { return Ptr >= other.Ptr; }
+ bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; }
+
+ bool operator==(iterator other) const { return Ptr == other.Ptr; }
+ bool operator!=(iterator other) const { return Ptr != other.Ptr; }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(StartOfData);
+}
+
+inline EHScopeStack::iterator EHScopeStack::end() const {
+ return iterator(EndOfBuffer);
+}
+
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCatchScope>(*begin()));
+ StartOfData += EHCatchScope::getSizeForNumHandlers(
+ cast<EHCatchScope>(*begin()).getNumHandlers());
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline void EHScopeStack::popTerminate() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHTerminateScope>(*begin()));
+ StartOfData += EHTerminateScope::getSize();
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
+ assert(sp.isValid() && "finding invalid savepoint");
+ assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
+ return iterator(EndOfBuffer - sp.Size);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::stabilize(iterator ir) const {
+ assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
+ return stable_iterator(EndOfBuffer - ir.Ptr);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ for (EHScopeStack::stable_iterator
+ I = getInnermostNormalCleanup(), E = stable_end(); I != E; ) {
+ EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
+ if (S.isActive()) return I;
+ I = S.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveEHCleanup() const {
+ for (EHScopeStack::stable_iterator
+ I = getInnermostEHCleanup(), E = stable_end(); I != E; ) {
+ EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
+ if (S.isActive()) return I;
+ I = S.getEnclosingEHCleanup();
+ }
+ return stable_end();
+}
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
new file mode 100644
index 0000000..3750ab8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -0,0 +1,2108 @@
+//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCall.h"
+#include "CGCXXABI.h"
+#include "CGRecordLayout.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Intrinsics.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===--------------------------------------------------------------------===//
+// Miscellaneous Helper Methods
+//===--------------------------------------------------------------------===//
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
+llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
+ const llvm::Twine &Name) {
+ if (!Builder.isNamePreserving())
+ return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
+ return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
+}
+
+void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
+ llvm::Value *Init) {
+ llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
+ llvm::BasicBlock *Block = AllocaInsertPt->getParent();
+ Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
+}
+
+llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
+ const llvm::Twine &Name) {
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
+ // FIXME: Should we prefer the preferred type alignment here?
+ CharUnits Align = getContext().getTypeAlignInChars(Ty);
+ Alloc->setAlignment(Align.getQuantity());
+ return Alloc;
+}
+
+llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
+ const llvm::Twine &Name) {
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
+ // FIXME: Should we prefer the preferred type alignment here?
+ CharUnits Align = getContext().getTypeAlignInChars(Ty);
+ Alloc->setAlignment(Align.getQuantity());
+ return Alloc;
+}
+
+/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+/// expression and compare the result against zero, returning an Int1Ty value.
+llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
+ if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
+ llvm::Value *MemPtr = EmitScalarExpr(E);
+ return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
+ }
+
+ QualType BoolTy = getContext().BoolTy;
+ if (!E->getType()->isAnyComplexType())
+ return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
+
+ return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
+}
+
+/// EmitAnyExpr - Emit code to compute the specified expression which can have
+/// any type. The result is returned as an RValue struct. If this is an
+/// aggregate expression, the aggloc/agglocvolatile arguments indicate where the
+/// result should be returned.
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc,
+ bool IsAggLocVolatile, bool IgnoreResult,
+ bool IsInitializer) {
+ if (!hasAggregateLLVMType(E->getType()))
+ return RValue::get(EmitScalarExpr(E, IgnoreResult));
+ else if (E->getType()->isAnyComplexType())
+ return RValue::getComplex(EmitComplexExpr(E, false, false,
+ IgnoreResult, IgnoreResult));
+
+ EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer);
+ return RValue::getAggregate(AggLoc, IsAggLocVolatile);
+}
+
+/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+/// always be accessible even if no aggregate location is provided.
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E,
+ bool IsAggLocVolatile,
+ bool IsInitializer) {
+ llvm::Value *AggLoc = 0;
+
+ if (hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType())
+ AggLoc = CreateMemTemp(E->getType(), "agg.tmp");
+ return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false,
+ IsInitializer);
+}
+
+/// EmitAnyExprToMem - Evaluate an expression into a given memory
+/// location.
+void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
+ llvm::Value *Location,
+ bool IsLocationVolatile,
+ bool IsInit) {
+ if (E->getType()->isComplexType())
+ EmitComplexExprIntoAddr(E, Location, IsLocationVolatile);
+ else if (hasAggregateLLVMType(E->getType()))
+ EmitAggExpr(E, Location, IsLocationVolatile, /*Ignore*/ false, IsInit);
+ else {
+ RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
+ LValue LV = MakeAddrLValue(Location, E->getType());
+ EmitStoreThroughLValue(RV, LV, E->getType());
+ }
+}
+
+/// \brief An adjustment to be made to the temporary created when emitting a
+/// reference binding, which accesses a particular subobject of that temporary.
+struct SubobjectAdjustment {
+ enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
+
+ union {
+ struct {
+ const CastExpr *BasePath;
+ const CXXRecordDecl *DerivedClass;
+ } DerivedToBase;
+
+ FieldDecl *Field;
+ };
+
+ SubobjectAdjustment(const CastExpr *BasePath,
+ const CXXRecordDecl *DerivedClass)
+ : Kind(DerivedToBaseAdjustment)
+ {
+ DerivedToBase.BasePath = BasePath;
+ DerivedToBase.DerivedClass = DerivedClass;
+ }
+
+ SubobjectAdjustment(FieldDecl *Field)
+ : Kind(FieldAdjustment)
+ {
+ this->Field = Field;
+ }
+};
+
+static llvm::Value *
+CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
+ const NamedDecl *InitializedDecl) {
+ if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (VD->hasGlobalStorage()) {
+ llvm::SmallString<256> Name;
+ CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Name);
+
+ const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
+
+ // Create the reference temporary.
+ llvm::GlobalValue *RefTemp =
+ new llvm::GlobalVariable(CGF.CGM.getModule(),
+ RefTempTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(RefTempTy),
+ Name.str());
+ return RefTemp;
+ }
+ }
+
+ return CGF.CreateMemTemp(Type, "ref.tmp");
+}
+
+static llvm::Value *
+EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
+ llvm::Value *&ReferenceTemporary,
+ const CXXDestructorDecl *&ReferenceTemporaryDtor,
+ const NamedDecl *InitializedDecl) {
+ if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
+ E = DAE->getExpr();
+
+ if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+
+ return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
+ ReferenceTemporary,
+ ReferenceTemporaryDtor,
+ InitializedDecl);
+ }
+
+ RValue RV;
+ if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) {
+ // Emit the expression as an lvalue.
+ LValue LV = CGF.EmitLValue(E);
+
+ if (LV.isSimple())
+ return LV.getAddress();
+
+ // We have to load the lvalue.
+ RV = CGF.EmitLoadOfLValue(LV, E->getType());
+ } else {
+ QualType ResultTy = E->getType();
+
+ llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
+ while (true) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ continue;
+ }
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if ((CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase) &&
+ E->getType()->isRecordType()) {
+ E = CE->getSubExpr();
+ CXXRecordDecl *Derived
+ = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
+ Adjustments.push_back(SubobjectAdjustment(CE, Derived));
+ continue;
+ }
+
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (ME->getBase()->isLvalue(CGF.getContext()) != Expr::LV_Valid &&
+ ME->getBase()->getType()->isRecordType()) {
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ E = ME->getBase();
+ Adjustments.push_back(SubobjectAdjustment(Field));
+ continue;
+ }
+ }
+ }
+
+ // Nothing changed.
+ break;
+ }
+
+ // Create a reference temporary if necessary.
+ if (CGF.hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType())
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+
+ RV = CGF.EmitAnyExpr(E, ReferenceTemporary, /*IsAggLocVolatile=*/false,
+ /*IgnoreResult=*/false, InitializedDecl);
+
+ if (InitializedDecl) {
+ // Get the destructor for the reference temporary.
+ if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!ClassDecl->hasTrivialDestructor())
+ ReferenceTemporaryDtor = ClassDecl->getDestructor();
+ }
+ }
+
+ // Check if need to perform derived-to-base casts and/or field accesses, to
+ // get from the temporary object we created (and, potentially, for which we
+ // extended the lifetime) to the subobject we're binding the reference to.
+ if (!Adjustments.empty()) {
+ llvm::Value *Object = RV.getAggregateAddr();
+ for (unsigned I = Adjustments.size(); I != 0; --I) {
+ SubobjectAdjustment &Adjustment = Adjustments[I-1];
+ switch (Adjustment.Kind) {
+ case SubobjectAdjustment::DerivedToBaseAdjustment:
+ Object =
+ CGF.GetAddressOfBaseClass(Object,
+ Adjustment.DerivedToBase.DerivedClass,
+ Adjustment.DerivedToBase.BasePath->path_begin(),
+ Adjustment.DerivedToBase.BasePath->path_end(),
+ /*NullCheckValue=*/false);
+ break;
+
+ case SubobjectAdjustment::FieldAdjustment: {
+ LValue LV =
+ CGF.EmitLValueForField(Object, Adjustment.Field, 0);
+ if (LV.isSimple()) {
+ Object = LV.getAddress();
+ break;
+ }
+
+ // For non-simple lvalues, we actually have to create a copy of
+ // the object we're binding to.
+ QualType T = Adjustment.Field->getType().getNonReferenceType()
+ .getUnqualifiedType();
+ Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
+ LValue TempLV = CGF.MakeAddrLValue(Object,
+ Adjustment.Field->getType());
+ CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV, T), TempLV, T);
+ break;
+ }
+
+ }
+ }
+
+ const llvm::Type *ResultPtrTy = CGF.ConvertType(ResultTy)->getPointerTo();
+ return CGF.Builder.CreateBitCast(Object, ResultPtrTy, "temp");
+ }
+ }
+
+ if (RV.isAggregate())
+ return RV.getAggregateAddr();
+
+ // Create a temporary variable that we can bind the reference to.
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+
+
+ unsigned Alignment =
+ CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
+ if (RV.isScalar())
+ CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
+ /*Volatile=*/false, Alignment, E->getType());
+ else
+ CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
+ /*Volatile=*/false);
+ return ReferenceTemporary;
+}
+
+RValue
+CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
+ const NamedDecl *InitializedDecl) {
+ llvm::Value *ReferenceTemporary = 0;
+ const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
+ llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
+ ReferenceTemporaryDtor,
+ InitializedDecl);
+ if (!ReferenceTemporaryDtor)
+ return RValue::get(Value);
+
+ // Make sure to call the destructor for the reference temporary.
+ if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (VD->hasGlobalStorage()) {
+ llvm::Constant *DtorFn =
+ CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
+ CGF.EmitCXXGlobalDtorRegistration(DtorFn,
+ cast<llvm::Constant>(ReferenceTemporary));
+
+ return RValue::get(Value);
+ }
+ }
+
+ PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
+
+ return RValue::get(Value);
+}
+
+
+/// getAccessedFieldNo - Given an encoded value and a result number, return the
+/// input field number being accessed.
+unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
+ const llvm::Constant *Elts) {
+ if (isa<llvm::ConstantAggregateZero>(Elts))
+ return 0;
+
+ return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
+}
+
+void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
+ if (!CatchUndefined)
+ return;
+
+ Address = Builder.CreateBitCast(Address, PtrToInt8Ty);
+
+ const llvm::Type *IntPtrT = IntPtrTy;
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1);
+ const llvm::IntegerType *Int1Ty = llvm::Type::getInt1Ty(VMContext);
+
+ // In time, people may want to control this and use a 1 here.
+ llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0);
+ llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
+ llvm::BasicBlock *Cont = createBasicBlock();
+ llvm::BasicBlock *Check = createBasicBlock();
+ llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
+ Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
+
+ EmitBlock(Check);
+ Builder.CreateCondBr(Builder.CreateICmpUGE(C,
+ llvm::ConstantInt::get(IntPtrTy, Size)),
+ Cont, getTrapBB());
+ EmitBlock(Cont);
+}
+
+
+CodeGenFunction::ComplexPairTy CodeGenFunction::
+EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
+ LV.isVolatileQualified());
+
+ llvm::Value *NextVal;
+ if (isa<llvm::IntegerType>(InVal.first->getType())) {
+ uint64_t AmountVal = isInc ? 1 : -1;
+ NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ } else {
+ QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
+ if (!isInc)
+ FVal.changeSign();
+ NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ }
+
+ ComplexPairTy IncVal(NextVal, InVal.second);
+
+ // Store the updated result through the lvalue.
+ StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? IncVal : InVal;
+}
+
+
+//===----------------------------------------------------------------------===//
+// LValue Expression Emission
+//===----------------------------------------------------------------------===//
+
+RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
+ if (Ty->isVoidType())
+ return RValue::get(0);
+
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ const llvm::Type *EltTy = ConvertType(CTy->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return RValue::getComplex(std::make_pair(U, U));
+ }
+
+ // If this is a use of an undefined aggregate type, the aggregate must have an
+ // identifiable address. Just because the contents of the value are undefined
+ // doesn't mean that the address can't be taken and compared.
+ if (hasAggregateLLVMType(Ty)) {
+ llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
+ return RValue::getAggregate(DestPtr);
+ }
+
+ return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
+}
+
+RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ return GetUndefRValue(E->getType());
+}
+
+LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
+ return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
+}
+
+LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
+ LValue LV = EmitLValue(E);
+ if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
+ EmitCheck(LV.getAddress(), getContext().getTypeSize(E->getType()) / 8);
+ return LV;
+}
+
+/// EmitLValue - Emit code to compute a designator that specifies the location
+/// of the expression.
+///
+/// This can return one of two things: a simple address or a bitfield reference.
+/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
+/// an LLVM pointer type.
+///
+/// If this returns a bitfield reference, nothing about the pointee type of the
+/// LLVM value is known: For example, it may not be a pointer to an integer.
+///
+/// If this returns a normal address, and if the lvalue's C type is fixed size,
+/// this method guarantees that the returned pointer type will point to an LLVM
+/// type of the same size of the lvalue's type. If the lvalue has a variable
+/// length type, this is not possible.
+///
+LValue CodeGenFunction::EmitLValue(const Expr *E) {
+ switch (E->getStmtClass()) {
+ default: return EmitUnsupportedLValue(E, "l-value expression");
+
+ case Expr::ObjCSelectorExprClass:
+ return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
+ case Expr::ObjCIsaExprClass:
+ return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
+ case Expr::BinaryOperatorClass:
+ return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
+ case Expr::CompoundAssignOperatorClass:
+ return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E));
+ case Expr::CallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ return EmitCallExprLValue(cast<CallExpr>(E));
+ case Expr::VAArgExprClass:
+ return EmitVAArgExprLValue(cast<VAArgExpr>(E));
+ case Expr::DeclRefExprClass:
+ return EmitDeclRefLValue(cast<DeclRefExpr>(E));
+ case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ case Expr::PredefinedExprClass:
+ return EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ case Expr::StringLiteralClass:
+ return EmitStringLiteralLValue(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
+
+ case Expr::BlockDeclRefExprClass:
+ return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
+
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass:
+ return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
+ case Expr::CXXBindTemporaryExprClass:
+ return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+ case Expr::CXXExprWithTemporariesClass:
+ return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E));
+ case Expr::CXXScalarValueInitExprClass:
+ return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
+ case Expr::CXXDefaultArgExprClass:
+ return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
+ case Expr::CXXTypeidExprClass:
+ return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
+
+ case Expr::ObjCMessageExprClass:
+ return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
+ case Expr::ObjCIvarRefExprClass:
+ return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
+ case Expr::ObjCPropertyRefExprClass:
+ return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
+ case Expr::ObjCImplicitSetterGetterRefExprClass:
+ return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E));
+ case Expr::ObjCSuperExprClass:
+ return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E));
+
+ case Expr::StmtExprClass:
+ return EmitStmtExprLValue(cast<StmtExpr>(E));
+ case Expr::UnaryOperatorClass:
+ return EmitUnaryOpLValue(cast<UnaryOperator>(E));
+ case Expr::ArraySubscriptExprClass:
+ return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+ case Expr::ExtVectorElementExprClass:
+ return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
+ case Expr::MemberExprClass:
+ return EmitMemberExpr(cast<MemberExpr>(E));
+ case Expr::CompoundLiteralExprClass:
+ return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
+ case Expr::ConditionalOperatorClass:
+ return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
+ case Expr::ChooseExprClass:
+ return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ return EmitCastLValue(cast<CastExpr>(E));
+ }
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ unsigned Alignment, QualType Ty) {
+ llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp");
+ if (Volatile)
+ Load->setVolatile(true);
+ if (Alignment)
+ Load->setAlignment(Alignment);
+
+ // Bool can have different representation in memory than in registers.
+ llvm::Value *V = Load;
+ if (Ty->isBooleanType())
+ if (V->getType() != llvm::Type::getInt1Ty(VMContext))
+ V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool");
+
+ return V;
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, unsigned Alignment,
+ QualType Ty) {
+
+ if (Ty->isBooleanType()) {
+ // Bool can have different representation in memory than in registers.
+ const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
+ Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false);
+ }
+
+ llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
+ if (Alignment)
+ Store->setAlignment(Alignment);
+}
+
+/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
+/// method emits the address of the lvalue, then loads the result as an rvalue,
+/// returning the rvalue.
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
+ if (LV.isObjCWeak()) {
+ // load of a __weak object.
+ llvm::Value *AddrWeakObj = LV.getAddress();
+ return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
+ AddrWeakObj));
+ }
+
+ if (LV.isSimple()) {
+ llvm::Value *Ptr = LV.getAddress();
+
+ // Functions are l-values that don't require loading.
+ if (ExprType->isFunctionType())
+ return RValue::get(Ptr);
+
+ // Everything needs a load.
+ return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
+ LV.getAlignment(), ExprType));
+
+ }
+
+ if (LV.isVectorElt()) {
+ llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
+ LV.isVolatileQualified(), "tmp");
+ return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
+ "vecext"));
+ }
+
+ // If this is a reference to a subset of the elements of a vector, either
+ // shuffle the input or extract/insert them as appropriate.
+ if (LV.isExtVectorElt())
+ return EmitLoadOfExtVectorElementLValue(LV, ExprType);
+
+ if (LV.isBitField())
+ return EmitLoadOfBitfieldLValue(LV, ExprType);
+
+ if (LV.isPropertyRef())
+ return EmitLoadOfPropertyRefLValue(LV, ExprType);
+
+ assert(LV.isKVCRef() && "Unknown LValue type!");
+ return EmitLoadOfKVCRefLValue(LV, ExprType);
+}
+
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
+ QualType ExprType) {
+ const CGBitFieldInfo &Info = LV.getBitFieldInfo();
+
+ // Get the output type.
+ const llvm::Type *ResLTy = ConvertType(ExprType);
+ unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+
+ // Compute the result as an OR of all of the individual component accesses.
+ llvm::Value *Res = 0;
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Get the field pointer.
+ llvm::Value *Ptr = LV.getBitFieldBaseAddr();
+
+ // Only offset by the field index if used, so that incoming values are not
+ // required to be structures.
+ if (AI.FieldIndex)
+ Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
+
+ // Offset by the byte offset, if used.
+ if (AI.FieldByteOffset) {
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+ Ptr = Builder.CreateBitCast(Ptr, i8PTy);
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
+ }
+
+ // Cast to the access type.
+ const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
+ ExprType.getAddressSpace());
+ Ptr = Builder.CreateBitCast(Ptr, PTy);
+
+ // Perform the load.
+ llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
+ if (AI.AccessAlignment)
+ Load->setAlignment(AI.AccessAlignment);
+
+ // Shift out unused low bits and mask out unused high bits.
+ llvm::Value *Val = Load;
+ if (AI.FieldBitStart)
+ Val = Builder.CreateLShr(Load, AI.FieldBitStart);
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
+ AI.TargetBitWidth),
+ "bf.clear");
+
+ // Extend or truncate to the target size.
+ if (AI.AccessWidth < ResSizeInBits)
+ Val = Builder.CreateZExt(Val, ResLTy);
+ else if (AI.AccessWidth > ResSizeInBits)
+ Val = Builder.CreateTrunc(Val, ResLTy);
+
+ // Shift into place, and OR into the result.
+ if (AI.TargetBitOffset)
+ Val = Builder.CreateShl(Val, AI.TargetBitOffset);
+ Res = Res ? Builder.CreateOr(Res, Val) : Val;
+ }
+
+ // If the bit-field is signed, perform the sign-extension.
+ //
+ // FIXME: This can easily be folded into the load of the high bits, which
+ // could also eliminate the mask of high bits in some situations.
+ if (Info.isSigned()) {
+ unsigned ExtraBits = ResSizeInBits - Info.getSize();
+ if (ExtraBits)
+ Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
+ ExtraBits, "bf.val.sext");
+ }
+
+ return RValue::get(Res);
+}
+
+RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
+ QualType ExprType) {
+ return EmitObjCPropertyGet(LV.getPropertyRefExpr());
+}
+
+RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV,
+ QualType ExprType) {
+ return EmitObjCPropertyGet(LV.getKVCRefExpr());
+}
+
+// If this is a reference to a subset of the elements of a vector, create an
+// appropriate shufflevector.
+RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
+ QualType ExprType) {
+ llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
+ LV.isVolatileQualified(), "tmp");
+
+ const llvm::Constant *Elts = LV.getExtVectorElts();
+
+ // If the result of the expression is a non-vector type, we must be extracting
+ // a single element. Just codegen as an extractelement.
+ const VectorType *ExprVT = ExprType->getAs<VectorType>();
+ if (!ExprVT) {
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
+ return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
+ }
+
+ // Always use shuffle vector to try to retain the original program structure
+ unsigned NumResultElts = ExprVT->getNumElements();
+
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumResultElts; ++i) {
+ unsigned InIdx = getAccessedFieldNo(i, Elts);
+ Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
+ }
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(Vec,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV, "tmp");
+ return RValue::get(Vec);
+}
+
+
+
+/// EmitStoreThroughLValue - Store the specified rvalue into the specified
+/// lvalue, where both are guaranteed to the have the same type, and that type
+/// is 'Ty'.
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
+ QualType Ty) {
+ if (!Dst.isSimple()) {
+ if (Dst.isVectorElt()) {
+ // Read/modify/write the vector, inserting the new element.
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
+ Dst.isVolatileQualified(), "tmp");
+ Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
+ Dst.getVectorIdx(), "vecins");
+ Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
+ return;
+ }
+
+ // If this is an update of extended vector elements, insert them as
+ // appropriate.
+ if (Dst.isExtVectorElt())
+ return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty);
+
+ if (Dst.isBitField())
+ return EmitStoreThroughBitfieldLValue(Src, Dst, Ty);
+
+ if (Dst.isPropertyRef())
+ return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty);
+
+ assert(Dst.isKVCRef() && "Unknown LValue type");
+ return EmitStoreThroughKVCRefLValue(Src, Dst, Ty);
+ }
+
+ if (Dst.isObjCWeak() && !Dst.isNonGC()) {
+ // load of a __weak object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ if (Dst.isObjCStrong() && !Dst.isNonGC()) {
+ // load of a __strong object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ if (Dst.isObjCIvar()) {
+ assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
+ const llvm::Type *ResultType = ConvertType(getContext().LongTy);
+ llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
+ llvm::Value *dst = RHS;
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ llvm::Value *LHS =
+ Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
+ llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
+ CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
+ BytesBetween);
+ } else if (Dst.isGlobalObjCRef()) {
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
+ Dst.isThreadLocalRef());
+ }
+ else
+ CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ assert(Src.isScalar() && "Can't emit an agg store with this method");
+ EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
+ Dst.isVolatileQualified(), Dst.getAlignment(), Ty);
+}
+
+void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+ QualType Ty,
+ llvm::Value **Result) {
+ const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
+
+ // Get the output type.
+ const llvm::Type *ResLTy = ConvertTypeForMem(Ty);
+ unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+
+ // Get the source value, truncated to the width of the bit-field.
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (Ty->isBooleanType())
+ SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
+
+ SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
+ Info.getSize()),
+ "bf.value");
+
+ // Return the new value of the bit-field, if requested.
+ if (Result) {
+ // Cast back to the proper type for result.
+ const llvm::Type *SrcTy = Src.getScalarVal()->getType();
+ llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
+ "bf.reload.val");
+
+ // Sign extend if necessary.
+ if (Info.isSigned()) {
+ unsigned ExtraBits = ResSizeInBits - Info.getSize();
+ if (ExtraBits)
+ ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
+ ExtraBits, "bf.reload.sext");
+ }
+
+ *Result = ReloadVal;
+ }
+
+ // Iterate over the components, writing each piece to memory.
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Get the field pointer.
+ llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
+
+ // Only offset by the field index if used, so that incoming values are not
+ // required to be structures.
+ if (AI.FieldIndex)
+ Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
+
+ // Offset by the byte offset, if used.
+ if (AI.FieldByteOffset) {
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+ Ptr = Builder.CreateBitCast(Ptr, i8PTy);
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
+ }
+
+ // Cast to the access type.
+ const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
+ Ty.getAddressSpace());
+ Ptr = Builder.CreateBitCast(Ptr, PTy);
+
+ // Extract the piece of the bit-field value to write in this access, limited
+ // to the values that are part of this access.
+ llvm::Value *Val = SrcVal;
+ if (AI.TargetBitOffset)
+ Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
+ AI.TargetBitWidth));
+
+ // Extend or truncate to the access size.
+ const llvm::Type *AccessLTy =
+ llvm::Type::getIntNTy(VMContext, AI.AccessWidth);
+ if (ResSizeInBits < AI.AccessWidth)
+ Val = Builder.CreateZExt(Val, AccessLTy);
+ else if (ResSizeInBits > AI.AccessWidth)
+ Val = Builder.CreateTrunc(Val, AccessLTy);
+
+ // Shift into the position in memory.
+ if (AI.FieldBitStart)
+ Val = Builder.CreateShl(Val, AI.FieldBitStart);
+
+ // If necessary, load and OR in bits that are outside of the bit-field.
+ if (AI.TargetBitWidth != AI.AccessWidth) {
+ llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
+ if (AI.AccessAlignment)
+ Load->setAlignment(AI.AccessAlignment);
+
+ // Compute the mask for zeroing the bits that are part of the bit-field.
+ llvm::APInt InvMask =
+ ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
+ AI.FieldBitStart + AI.TargetBitWidth);
+
+ // Apply the mask and OR in to the value to write.
+ Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
+ }
+
+ // Write the value.
+ llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
+ Dst.isVolatileQualified());
+ if (AI.AccessAlignment)
+ Store->setAlignment(AI.AccessAlignment);
+ }
+}
+
+void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src);
+}
+
+void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ EmitObjCPropertySet(Dst.getKVCRefExpr(), Src);
+}
+
+void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ // This access turns into a read/modify/write of the vector. Load the input
+ // value now.
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified(), "tmp");
+ const llvm::Constant *Elts = Dst.getExtVectorElts();
+
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (const VectorType *VTy = Ty->getAs<VectorType>()) {
+ unsigned NumSrcElts = VTy->getNumElements();
+ unsigned NumDstElts =
+ cast<llvm::VectorType>(Vec->getType())->getNumElements();
+ if (NumDstElts == NumSrcElts) {
+ // Use shuffle vector is the src and destination are the same number of
+ // elements and restore the vector mask since it is on the side it will be
+ // stored.
+ llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
+ for (unsigned i = 0; i != NumSrcElts; ++i) {
+ unsigned InIdx = getAccessedFieldNo(i, Elts);
+ Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
+ }
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV, "tmp");
+ } else if (NumDstElts > NumSrcElts) {
+ // Extended the source vector to the same length and then shuffle it
+ // into the destination.
+ // FIXME: since we're shuffling with undef, can we just use the indices
+ // into that? This could be simpler.
+ llvm::SmallVector<llvm::Constant*, 4> ExtMask;
+ unsigned i;
+ for (i = 0; i != NumSrcElts; ++i)
+ ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
+ for (; i != NumDstElts; ++i)
+ ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
+ llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
+ ExtMask.size());
+ llvm::Value *ExtSrcVal =
+ Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(SrcVal->getType()),
+ ExtMaskV, "tmp");
+ // build identity
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumDstElts; ++i)
+ Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
+
+ // modify when what gets shuffled in
+ for (unsigned i = 0; i != NumSrcElts; ++i) {
+ unsigned Idx = getAccessedFieldNo(i, Elts);
+ Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
+ }
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
+ } else {
+ // We should never shorten the vector
+ assert(0 && "unexpected shorten vector length");
+ }
+ } else {
+ // If the Src is a scalar (not a vector) it must be updating one element.
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
+ Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
+ }
+
+ Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
+}
+
+// setObjCGCLValueClass - sets class of he lvalue for the purpose of
+// generating write-barries API. It is currently a global, ivar,
+// or neither.
+static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
+ LValue &LV) {
+ if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return;
+
+ if (isa<ObjCIvarRefExpr>(E)) {
+ LV.setObjCIvar(true);
+ ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
+ LV.setBaseIvarExp(Exp->getBase());
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+
+ if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
+ if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) ||
+ VD->isFileVarDecl()) {
+ LV.setGlobalObjCRef(true);
+ LV.setThreadLocalRef(VD->isThreadSpecified());
+ }
+ }
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+
+ if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ return;
+ }
+
+ if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ if (LV.isObjCIvar()) {
+ // If cast is to a structure pointer, follow gcc's behavior and make it
+ // a non-ivar write-barrier.
+ QualType ExpTy = E->getType();
+ if (ExpTy->isPointerType())
+ ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+ if (ExpTy->isRecordType())
+ LV.setObjCIvar(false);
+ }
+ return;
+ }
+ if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ return;
+ }
+
+ if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ return;
+ }
+
+ if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+ if (LV.isObjCIvar() && !LV.isObjCArray())
+ // Using array syntax to assigning to what an ivar points to is not
+ // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
+ LV.setObjCIvar(false);
+ else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
+ // Using array syntax to assigning to what global points to is not
+ // same as assigning to the global itself. {id *G;} G[i] = 0;
+ LV.setGlobalObjCRef(false);
+ return;
+ }
+
+ if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+ // We don't know if member is an 'ivar', but this flag is looked at
+ // only in the context of LV.isObjCIvar().
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+}
+
+static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const VarDecl *VD) {
+ assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
+ "Var decl must have external storage or be a file var decl!");
+
+ llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
+ if (VD->getType()->isReferenceType())
+ V = CGF.Builder.CreateLoad(V, "tmp");
+ unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity();
+ LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ setObjCGCLValueClass(CGF.getContext(), E, LV);
+ return LV;
+}
+
+static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const FunctionDecl *FD) {
+ llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD);
+ if (!FD->hasPrototype()) {
+ if (const FunctionProtoType *Proto =
+ FD->getType()->getAs<FunctionProtoType>()) {
+ // Ugly case: for a K&R-style definition, the type of the definition
+ // isn't the same as the type of a use. Correct for this with a
+ // bitcast.
+ QualType NoProtoType =
+ CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
+ NoProtoType = CGF.getContext().getPointerType(NoProtoType);
+ V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp");
+ }
+ }
+ unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity();
+ return CGF.MakeAddrLValue(V, E->getType(), Alignment);
+}
+
+LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
+ const NamedDecl *ND = E->getDecl();
+ unsigned Alignment = CGF.getContext().getDeclAlign(ND).getQuantity();
+
+ if (ND->hasAttr<WeakRefAttr>()) {
+ const ValueDecl* VD = cast<ValueDecl>(ND);
+ llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
+ return MakeAddrLValue(Aliasee, E->getType(), Alignment);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+
+ // Check if this is a global variable.
+ if (VD->hasExternalStorage() || VD->isFileVarDecl())
+ return EmitGlobalVarDeclLValue(*this, E, VD);
+
+ bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>();
+
+ llvm::Value *V = LocalDeclMap[VD];
+ if (!V && getContext().getLangOptions().CPlusPlus &&
+ VD->isStaticLocal())
+ V = CGM.getStaticLocalDeclAddress(VD);
+ assert(V && "DeclRefExpr not entered in LocalDeclMap?");
+
+ if (VD->hasAttr<BlocksAttr>()) {
+ V = Builder.CreateStructGEP(V, 1, "forwarding");
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
+ VD->getNameAsString());
+ }
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+
+ LValue LV = MakeAddrLValue(V, E->getType(), Alignment);
+ if (NonGCable) {
+ LV.getQuals().removeObjCGCAttr();
+ LV.setNonGC(true);
+ }
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+ }
+
+ // If we're emitting an instance method as an independent lvalue,
+ // we're actually emitting a member pointer.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (MD->isInstance()) {
+ llvm::Value *V = CGM.getCXXABI().EmitMemberPointer(MD);
+ return MakeAddrLValue(V, MD->getType(), Alignment);
+ }
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, FD);
+
+ // If we're emitting a field as an independent lvalue, we're
+ // actually emitting a member pointer.
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(ND)) {
+ llvm::Value *V = CGM.getCXXABI().EmitMemberPointer(FD);
+ return MakeAddrLValue(V, FD->getType(), Alignment);
+ }
+
+ assert(false && "Unhandled DeclRefExpr");
+
+ // an invalid LValue, but the assert will
+ // ensure that this point is never reached.
+ return LValue();
+}
+
+LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
+ unsigned Alignment =
+ CGF.getContext().getDeclAlign(E->getDecl()).getQuantity();
+ return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment);
+}
+
+LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
+ // __extension__ doesn't affect lvalue-ness.
+ if (E->getOpcode() == UO_Extension)
+ return EmitLValue(E->getSubExpr());
+
+ QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
+ switch (E->getOpcode()) {
+ default: assert(0 && "Unknown unary operator lvalue!");
+ case UO_Deref: {
+ QualType T = E->getSubExpr()->getType()->getPointeeType();
+ assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
+
+ LValue LV = MakeAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
+ LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
+
+ // We should not generate __weak write barrier on indirect reference
+ // of a pointer to object; as in void foo (__weak id *param); *param = 0;
+ // But, we continue to generate __strong write barrier on indirect write
+ // into a pointer to object.
+ if (getContext().getLangOptions().ObjC1 &&
+ getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
+ LV.isObjCWeak())
+ LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
+ return LV;
+ }
+ case UO_Real:
+ case UO_Imag: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ unsigned Idx = E->getOpcode() == UO_Imag;
+ return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
+ Idx, "idx"),
+ ExprTy);
+ }
+ case UO_PreInc:
+ case UO_PreDec: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ bool isInc = E->getOpcode() == UO_PreInc;
+
+ if (E->getType()->isAnyComplexType())
+ EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ else
+ EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ return LV;
+ }
+ }
+}
+
+LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
+ return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
+ E->getType());
+}
+
+LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
+ return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
+ E->getType());
+}
+
+
+LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
+ switch (E->getIdentType()) {
+ default:
+ return EmitUnsupportedLValue(E, "predefined expression");
+
+ case PredefinedExpr::Func:
+ case PredefinedExpr::Function:
+ case PredefinedExpr::PrettyFunction: {
+ unsigned Type = E->getIdentType();
+ std::string GlobalVarName;
+
+ switch (Type) {
+ default: assert(0 && "Invalid type");
+ case PredefinedExpr::Func:
+ GlobalVarName = "__func__.";
+ break;
+ case PredefinedExpr::Function:
+ GlobalVarName = "__FUNCTION__.";
+ break;
+ case PredefinedExpr::PrettyFunction:
+ GlobalVarName = "__PRETTY_FUNCTION__.";
+ break;
+ }
+
+ llvm::StringRef FnName = CurFn->getName();
+ if (FnName.startswith("\01"))
+ FnName = FnName.substr(1);
+ GlobalVarName += FnName;
+
+ const Decl *CurDecl = CurCodeDecl;
+ if (CurDecl == 0)
+ CurDecl = getContext().getTranslationUnitDecl();
+
+ std::string FunctionName =
+ PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl);
+
+ llvm::Constant *C =
+ CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
+ return MakeAddrLValue(C, E->getType());
+ }
+ }
+}
+
+llvm::BasicBlock *CodeGenFunction::getTrapBB() {
+ const CodeGenOptions &GCO = CGM.getCodeGenOpts();
+
+ // If we are not optimzing, don't collapse all calls to trap in the function
+ // to the same call, that way, in the debugger they can see which operation
+ // did in fact fail. If we are optimizing, we collapse all calls to trap down
+ // to just one per function to save on codesize.
+ if (GCO.OptimizationLevel && TrapBB)
+ return TrapBB;
+
+ llvm::BasicBlock *Cont = 0;
+ if (HaveInsertPoint()) {
+ Cont = createBasicBlock("cont");
+ EmitBranch(Cont);
+ }
+ TrapBB = createBasicBlock("trap");
+ EmitBlock(TrapBB);
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0);
+ llvm::CallInst *TrapCall = Builder.CreateCall(F);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+
+ if (Cont)
+ EmitBlock(Cont);
+ return TrapBB;
+}
+
+/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
+/// array to pointer, return the array subexpression.
+static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
+ // If this isn't just an array->pointer decay, bail out.
+ const CastExpr *CE = dyn_cast<CastExpr>(E);
+ if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
+ return 0;
+
+ // If this is a decay from variable width array, bail out.
+ const Expr *SubExpr = CE->getSubExpr();
+ if (SubExpr->getType()->isVariableArrayType())
+ return 0;
+
+ return SubExpr;
+}
+
+LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ // The index must always be an integer, which is not an aggregate. Emit it.
+ llvm::Value *Idx = EmitScalarExpr(E->getIdx());
+ QualType IdxTy = E->getIdx()->getType();
+ bool IdxSigned = IdxTy->isSignedIntegerType();
+
+ // If the base is a vector type, then we are forming a vector element lvalue
+ // with this subscript.
+ if (E->getBase()->getType()->isVectorType()) {
+ // Emit the vector as an lvalue to get its address.
+ LValue LHS = EmitLValue(E->getBase());
+ assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
+ Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vidx");
+ return LValue::MakeVectorElt(LHS.getAddress(), Idx,
+ E->getBase()->getType().getCVRQualifiers());
+ }
+
+ // Extend or truncate the index type to 32 or 64-bits.
+ if (!Idx->getType()->isIntegerTy(LLVMPointerWidth))
+ Idx = Builder.CreateIntCast(Idx, IntPtrTy,
+ IdxSigned, "idxprom");
+
+ // FIXME: As llvm implements the object size checking, this can come out.
+ if (CatchUndefined) {
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
+ if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
+ if (const ConstantArrayType *CAT
+ = getContext().getAsConstantArrayType(DRE->getType())) {
+ llvm::APInt Size = CAT->getSize();
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+ Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
+ llvm::ConstantInt::get(Idx->getType(), Size)),
+ Cont, getTrapBB());
+ EmitBlock(Cont);
+ }
+ }
+ }
+ }
+ }
+
+ // We know that the pointer points to a type of the correct size, unless the
+ // size is a VLA or Objective-C interface.
+ llvm::Value *Address = 0;
+ if (const VariableArrayType *VAT =
+ getContext().getAsVariableArrayType(E->getType())) {
+ llvm::Value *VLASize = GetVLASize(VAT);
+
+ Idx = Builder.CreateMul(Idx, VLASize);
+
+ QualType BaseType = getContext().getBaseElementType(VAT);
+
+ CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType);
+ Idx = Builder.CreateUDiv(Idx,
+ llvm::ConstantInt::get(Idx->getType(),
+ BaseTypeSize.getQuantity()));
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+
+ Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+ } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
+ // Indexing over an interface, as in "NSString *P; P[4];"
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ getContext().getTypeSizeInChars(OIT).getQuantity());
+
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+ Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
+ Idx, "arrayidx");
+ Address = Builder.CreateBitCast(Address, Base->getType());
+ } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
+ // If this is A[i] where A is an array, the frontend will have decayed the
+ // base to be a ArrayToPointerDecay implicit cast. While correct, it is
+ // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
+ // "gep x, i" here. Emit one "gep A, 0, i".
+ assert(Array->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+ llvm::Value *ArrayPtr = EmitLValue(Array).getAddress();
+ llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
+ llvm::Value *Args[] = { Zero, Idx };
+
+ Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, Args+2, "arrayidx");
+ } else {
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+ Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+ }
+
+ QualType T = E->getBase()->getType()->getPointeeType();
+ assert(!T.isNull() &&
+ "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
+
+ LValue LV = MakeAddrLValue(Address, T);
+ LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
+
+ if (getContext().getLangOptions().ObjC1 &&
+ getContext().getLangOptions().getGCMode() != LangOptions::NonGC) {
+ LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
+ setObjCGCLValueClass(getContext(), E, LV);
+ }
+ return LV;
+}
+
+static
+llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
+ llvm::SmallVector<unsigned, 4> &Elts) {
+ llvm::SmallVector<llvm::Constant*, 4> CElts;
+
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ for (unsigned i = 0, e = Elts.size(); i != e; ++i)
+ CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
+
+ return llvm::ConstantVector::get(&CElts[0], CElts.size());
+}
+
+LValue CodeGenFunction::
+EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
+ // Emit the base vector as an l-value.
+ LValue Base;
+
+ // ExtVectorElementExpr's base can either be a vector or pointer to vector.
+ if (E->isArrow()) {
+ // If it is a pointer to a vector, emit the address and form an lvalue with
+ // it.
+ llvm::Value *Ptr = EmitScalarExpr(E->getBase());
+ const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
+ Base = MakeAddrLValue(Ptr, PT->getPointeeType());
+ Base.getQuals().removeObjCGCAttr();
+ } else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) {
+ // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
+ // emit the base as an lvalue.
+ assert(E->getBase()->getType()->isVectorType());
+ Base = EmitLValue(E->getBase());
+ } else {
+ // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
+ assert(E->getBase()->getType()->getAs<VectorType>() &&
+ "Result must be a vector");
+ llvm::Value *Vec = EmitScalarExpr(E->getBase());
+
+ // Store the vector to memory (because LValue wants an address).
+ llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
+ Builder.CreateStore(Vec, VecMem);
+ Base = MakeAddrLValue(VecMem, E->getBase()->getType());
+ }
+
+ // Encode the element access list into a vector of unsigned indices.
+ llvm::SmallVector<unsigned, 4> Indices;
+ E->getEncodedElementAccess(Indices);
+
+ if (Base.isSimple()) {
+ llvm::Constant *CV = GenerateConstantVector(VMContext, Indices);
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV,
+ Base.getVRQualifiers());
+ }
+ assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
+
+ llvm::Constant *BaseElts = Base.getExtVectorElts();
+ llvm::SmallVector<llvm::Constant *, 4> CElts;
+
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
+ if (isa<llvm::ConstantAggregateZero>(BaseElts))
+ CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0));
+ else
+ CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
+ }
+ llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
+ Base.getVRQualifiers());
+}
+
+LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
+ bool isNonGC = false;
+ Expr *BaseExpr = E->getBase();
+ llvm::Value *BaseValue = NULL;
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy =
+ BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) ||
+ isa<ObjCImplicitSetterGetterRefExpr>(
+ BaseExpr->IgnoreParens())) {
+ RValue RV = EmitObjCPropertyGet(BaseExpr);
+ BaseValue = RV.getAggregateAddr();
+ BaseQuals = BaseExpr->getType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ if (BaseLV.isNonGC())
+ isNonGC = true;
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ NamedDecl *ND = E->getMemberDecl();
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
+ LValue LV = EmitLValueForField(BaseValue, Field,
+ BaseQuals.getCVRQualifiers());
+ LV.setNonGC(isNonGC);
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+ }
+
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ return EmitGlobalVarDeclLValue(*this, E, VD);
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, FD);
+
+ assert(false && "Unhandled member declaration!");
+ return LValue();
+}
+
+LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers) {
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
+ return LValue::MakeBitfield(BaseValue, Info,
+ Field->getType().getCVRQualifiers()|CVRQualifiers);
+}
+
+/// EmitLValueForAnonRecordField - Given that the field is a member of
+/// an anonymous struct or union buried inside a record, and given
+/// that the base value is a pointer to the enclosing record, derive
+/// an lvalue for the ultimate field.
+LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
+ const FieldDecl *Field,
+ unsigned CVRQualifiers) {
+ llvm::SmallVector<const FieldDecl *, 8> Path;
+ Path.push_back(Field);
+
+ while (Field->getParent()->isAnonymousStructOrUnion()) {
+ const ValueDecl *VD = Field->getParent()->getAnonymousStructOrUnionObject();
+ if (!isa<FieldDecl>(VD)) break;
+ Field = cast<FieldDecl>(VD);
+ Path.push_back(Field);
+ }
+
+ llvm::SmallVectorImpl<const FieldDecl*>::reverse_iterator
+ I = Path.rbegin(), E = Path.rend();
+ while (true) {
+ LValue LV = EmitLValueForField(BaseValue, *I, CVRQualifiers);
+ if (++I == E) return LV;
+
+ assert(LV.isSimple());
+ BaseValue = LV.getAddress();
+ CVRQualifiers |= LV.getVRQualifiers();
+ }
+}
+
+LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers) {
+ if (Field->isBitField())
+ return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
+
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ unsigned idx = RL.getLLVMFieldNo(Field);
+ llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
+
+ // Match union field type.
+ if (Field->getParent()->isUnion()) {
+ const llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(Field->getType());
+ const llvm::PointerType * BaseTy =
+ cast<llvm::PointerType>(BaseValue->getType());
+ unsigned AS = BaseTy->getAddressSpace();
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::get(FieldTy, AS),
+ "tmp");
+ }
+ if (Field->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+
+ unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
+ LValue LV = MakeAddrLValue(V, Field->getType(), Alignment);
+ LV.getQuals().addCVRQualifiers(CVRQualifiers);
+
+ // __weak attribute on a field is ignored.
+ if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
+ LV.getQuals().removeObjCGCAttr();
+
+ return LV;
+}
+
+LValue
+CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers) {
+ QualType FieldType = Field->getType();
+
+ if (!FieldType->isReferenceType())
+ return EmitLValueForField(BaseValue, Field, CVRQualifiers);
+
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ unsigned idx = RL.getLLVMFieldNo(Field);
+ llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
+
+ assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
+
+ unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
+ return MakeAddrLValue(V, FieldType, Alignment);
+}
+
+LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
+ llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
+ const Expr* InitExpr = E->getInitializer();
+ LValue Result = MakeAddrLValue(DeclPtr, E->getType());
+
+ EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false);
+
+ return Result;
+}
+
+LValue
+CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) {
+ if (E->isLvalue(getContext()) == Expr::LV_Valid) {
+ if (int Cond = ConstantFoldsToSimpleInteger(E->getCond())) {
+ Expr *Live = Cond == 1 ? E->getLHS() : E->getRHS();
+ if (Live)
+ return EmitLValue(Live);
+ }
+
+ if (!E->getLHS())
+ return EmitUnsupportedLValue(E, "conditional operator with missing LHS");
+
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = createBasicBlock("cond.end");
+
+ EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+ // Any temporaries created here are conditional.
+ BeginConditionalBranch();
+ EmitBlock(LHSBlock);
+ LValue LHS = EmitLValue(E->getLHS());
+ EndConditionalBranch();
+
+ if (!LHS.isSimple())
+ return EmitUnsupportedLValue(E, "conditional operator");
+
+ // FIXME: We shouldn't need an alloca for this.
+ llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp");
+ Builder.CreateStore(LHS.getAddress(), Temp);
+ EmitBranch(ContBlock);
+
+ // Any temporaries created here are conditional.
+ BeginConditionalBranch();
+ EmitBlock(RHSBlock);
+ LValue RHS = EmitLValue(E->getRHS());
+ EndConditionalBranch();
+ if (!RHS.isSimple())
+ return EmitUnsupportedLValue(E, "conditional operator");
+
+ Builder.CreateStore(RHS.getAddress(), Temp);
+ EmitBranch(ContBlock);
+
+ EmitBlock(ContBlock);
+
+ Temp = Builder.CreateLoad(Temp, "lv");
+ return MakeAddrLValue(Temp, E->getType());
+ }
+
+ // ?: here should be an aggregate.
+ assert((hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType()) &&
+ "Unexpected conditional operator!");
+
+ return EmitAggExprToLValue(E);
+}
+
+/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
+/// If the cast is a dynamic_cast, we can have the usual lvalue result,
+/// otherwise if a cast is needed by the code generator in an lvalue context,
+/// then it must mean that we need the address of an aggregate in order to
+/// access one of its fields. This can happen for all the reasons that casts
+/// are permitted with aggregate result, including noop aggregate casts, and
+/// cast from scalar to union.
+LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ case CK_ToVoid:
+ return EmitUnsupportedLValue(E, "unexpected cast lvalue");
+
+ case CK_NoOp:
+ if (E->getSubExpr()->Classify(getContext()).getKind()
+ != Expr::Classification::CL_PRValue) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ if (LV.isPropertyRef() || LV.isKVCRef()) {
+ QualType QT = E->getSubExpr()->getType();
+ RValue RV =
+ LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
+ : EmitLoadOfKVCRefLValue(LV, QT);
+ assert(!RV.isScalar() && "EmitCastLValue-scalar cast of property ref");
+ llvm::Value *V = RV.getAggregateAddr();
+ return MakeAddrLValue(V, QT);
+ }
+ return LV;
+ }
+ // Fall through to synthesize a temporary.
+
+ case CK_Unknown:
+ case CK_BitCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToMemberPointer:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingCast:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_AnyPointerToBlockPointerCast: {
+ // These casts only produce lvalues when we're binding a reference to a
+ // temporary realized from a (converted) pure rvalue. Emit the expression
+ // as a value, copy it into a temporary, and return an lvalue referring to
+ // that temporary.
+ llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
+ EmitAnyExprToMem(E, V, false, false);
+ return MakeAddrLValue(V, E->getType());
+ }
+
+ case CK_Dynamic: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *V = LV.getAddress();
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
+ return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
+ }
+
+ case CK_ConstructorConversion:
+ case CK_UserDefinedConversion:
+ case CK_AnyPointerToObjCPointerCast:
+ return EmitLValue(E->getSubExpr());
+
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getSubExpr()->getType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *This;
+ if (LV.isPropertyRef() || LV.isKVCRef()) {
+ QualType QT = E->getSubExpr()->getType();
+ RValue RV =
+ LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
+ : EmitLoadOfKVCRefLValue(LV, QT);
+ assert (!RV.isScalar() && "EmitCastLValue");
+ This = RV.getAggregateAddr();
+ }
+ else
+ This = LV.getAddress();
+
+ // Perform the derived-to-base conversion
+ llvm::Value *Base =
+ GetAddressOfBaseClass(This, DerivedClassDecl,
+ E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false);
+
+ return MakeAddrLValue(Base, E->getType());
+ }
+ case CK_ToUnion:
+ return EmitAggExprToLValue(E);
+ case CK_BaseToDerived: {
+ const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ // Perform the base-to-derived conversion
+ llvm::Value *Derived =
+ GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
+ E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false);
+
+ return MakeAddrLValue(Derived, E->getType());
+ }
+ case CK_LValueBitCast: {
+ // This must be a reinterpret_cast (or c-style equivalent).
+ const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
+
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(CE->getTypeAsWritten()));
+ return MakeAddrLValue(V, E->getType());
+ }
+ case CK_ObjCObjectLValueCast: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ QualType ToType = getContext().getLValueReferenceType(E->getType());
+ llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(ToType));
+ return MakeAddrLValue(V, E->getType());
+ }
+ }
+
+ llvm_unreachable("Unhandled lvalue cast kind?");
+}
+
+LValue CodeGenFunction::EmitNullInitializationLValue(
+ const CXXScalarValueInitExpr *E) {
+ QualType Ty = E->getType();
+ LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
+ EmitNullInitialization(LV.getAddress(), Ty);
+ return LV;
+}
+
+//===--------------------------------------------------------------------===//
+// Expression Emission
+//===--------------------------------------------------------------------===//
+
+
+RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ // Builtins never have block type.
+ if (E->getCallee()->getType()->isBlockPointerType())
+ return EmitBlockCallExpr(E, ReturnValue);
+
+ if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
+ return EmitCXXMemberCallExpr(CE, ReturnValue);
+
+ const Decl *TargetDecl = 0;
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ TargetDecl = DRE->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl))
+ if (unsigned builtinID = FD->getBuiltinID())
+ return EmitBuiltinExpr(FD, builtinID, E);
+ }
+ }
+
+ if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
+ return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
+
+ if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitScalarExpr(E->getCallee());
+ return RValue::get(0);
+ }
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+ return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
+ E->arg_begin(), E->arg_end(), TargetDecl);
+}
+
+LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
+ // Comma expressions just emit their LHS then their RHS as an l-value.
+ if (E->getOpcode() == BO_Comma) {
+ EmitAnyExpr(E->getLHS());
+ EnsureInsertPoint();
+ return EmitLValue(E->getRHS());
+ }
+
+ if (E->getOpcode() == BO_PtrMemD ||
+ E->getOpcode() == BO_PtrMemI)
+ return EmitPointerToDataMemberBinaryExpr(E);
+
+ // Can only get l-value for binary operator expressions which are a
+ // simple assignment of aggregate type.
+ if (E->getOpcode() != BO_Assign)
+ return EmitUnsupportedLValue(E, "binary l-value expression");
+
+ if (!hasAggregateLLVMType(E->getType())) {
+ // Emit the LHS as an l-value.
+ LValue LV = EmitLValue(E->getLHS());
+ // Store the value through the l-value.
+ EmitStoreThroughLValue(EmitAnyExpr(E->getRHS()), LV, E->getType());
+ return LV;
+ }
+
+ return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
+ RValue RV = EmitCallExpr(E);
+
+ if (!RV.isScalar())
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+
+ assert(E->getCallReturnType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
+ // FIXME: This shouldn't require another copy.
+ return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
+ llvm::Value *Temp = CreateMemTemp(E->getType(), "tmp");
+ EmitCXXConstructExpr(Temp, E);
+ return MakeAddrLValue(Temp, E->getType());
+}
+
+LValue
+CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
+ return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
+}
+
+LValue
+CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ EmitCXXTemporary(E->getTemporary(), LV.getAddress());
+ return LV;
+}
+
+LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
+ RValue RV = EmitObjCMessageExpr(E);
+
+ if (!RV.isScalar())
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+
+ assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
+ llvm::Value *V =
+ CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
+ return MakeAddrLValue(V, E->getType());
+}
+
+llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
+}
+
+LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
+ Ivar, CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
+ // FIXME: A lot of the code below could be shared with EmitMemberExpr.
+ llvm::Value *BaseValue = 0;
+ const Expr *BaseExpr = E->getBase();
+ Qualifiers BaseQuals;
+ QualType ObjectTy;
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ ObjectTy = BaseExpr->getType()->getPointeeType();
+ BaseQuals = ObjectTy.getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ ObjectTy = BaseExpr->getType();
+ BaseQuals = ObjectTy.getQualifiers();
+ }
+
+ LValue LV =
+ EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
+ BaseQuals.getCVRQualifiers());
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+}
+
+LValue
+CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or store
+ // through it.
+ return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers());
+}
+
+LValue CodeGenFunction::EmitObjCKVCRefLValue(
+ const ObjCImplicitSetterGetterRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or store
+ // through it.
+ return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers());
+}
+
+LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
+ return EmitUnsupportedLValue(E, "use of super");
+}
+
+LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
+ // Can only get l-value for message expression returning aggregate type
+ RValue RV = EmitAnyExprToTemp(E);
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+}
+
+RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl) {
+ // Get the actual function type. The callee type will always be a pointer to
+ // function type or a block pointer type.
+ assert(CalleeType->isFunctionPointerType() &&
+ "Call must have function pointer type!");
+
+ CalleeType = getContext().getCanonicalType(CalleeType);
+
+ const FunctionType *FnType
+ = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
+ QualType ResultType = FnType->getResultType();
+
+ CallArgList Args;
+ EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
+
+ return EmitCall(CGM.getTypes().getFunctionInfo(Args, FnType),
+ Callee, ReturnValue, Args, TargetDecl);
+}
+
+LValue CodeGenFunction::
+EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
+ llvm::Value *BaseV;
+ if (E->getOpcode() == BO_PtrMemI)
+ BaseV = EmitScalarExpr(E->getLHS());
+ else
+ BaseV = EmitLValue(E->getLHS()).getAddress();
+
+ llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
+
+ const MemberPointerType *MPT
+ = E->getRHS()->getType()->getAs<MemberPointerType>();
+
+ llvm::Value *AddV =
+ CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
+
+ return MakeAddrLValue(AddV, MPT->getPointeeType());
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
new file mode 100644
index 0000000..28c8b35
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
@@ -0,0 +1,776 @@
+//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Aggregate Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Aggregate Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ llvm::Value *DestPtr;
+ bool VolatileDest;
+ bool IgnoreResult;
+ bool IsInitializer;
+ bool RequiresGCollection;
+
+ ReturnValueSlot getReturnValueSlot() const {
+ // If the destination slot requires garbage collection, we can't
+ // use the real return value slot, because we have to use the GC
+ // API.
+ if (RequiresGCollection) return ReturnValueSlot();
+
+ return ReturnValueSlot(DestPtr, VolatileDest);
+ }
+
+public:
+ AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v,
+ bool ignore, bool isinit, bool requiresGCollection)
+ : CGF(cgf), Builder(CGF.Builder),
+ DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore),
+ IsInitializer(isinit), RequiresGCollection(requiresGCollection) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ /// EmitAggLoadOfLValue - Given an expression with aggregate type that
+ /// represents a value lvalue, this method emits the address of the lvalue,
+ /// then loads the result into DestPtr.
+ void EmitAggLoadOfLValue(const Expr *E);
+
+ /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+ void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
+ void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
+
+ void EmitGCMove(const Expr *E, RValue Src);
+
+ bool TypeRequiresGCollection(QualType T);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ void VisitStmt(Stmt *S) {
+ CGF.ErrorUnsupported(S, "aggregate expression");
+ }
+ void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
+ void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
+
+ // l-values.
+ void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
+ void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
+ void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
+ void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitPredefinedExpr(const PredefinedExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+
+ // Operators.
+ void VisitCastExpr(CastExpr *E);
+ void VisitCallExpr(const CallExpr *E);
+ void VisitStmtExpr(const StmtExpr *E);
+ void VisitBinaryOperator(const BinaryOperator *BO);
+ void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
+ void VisitBinAssign(const BinaryOperator *E);
+ void VisitBinComma(const BinaryOperator *E);
+
+ void VisitObjCMessageExpr(ObjCMessageExpr *E);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
+ void VisitObjCImplicitSetterGetterRefExpr(ObjCImplicitSetterGetterRefExpr *E);
+
+ void VisitConditionalOperator(const ConditionalOperator *CO);
+ void VisitChooseExpr(const ChooseExpr *CE);
+ void VisitInitListExpr(InitListExpr *E);
+ void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ Visit(DAE->getExpr());
+ }
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ void VisitCXXConstructExpr(const CXXConstructExpr *E);
+ void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+ void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
+ void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
+
+ void VisitVAArgExpr(VAArgExpr *E);
+
+ void EmitInitializationToLValue(Expr *E, LValue Address, QualType T);
+ void EmitNullInitializationToLValue(LValue Address, QualType T);
+ // case Expr::ChooseExprClass:
+ void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitAggLoadOfLValue - Given an expression with aggregate type that
+/// represents a value lvalue, this method emits the address of the lvalue,
+/// then loads the result into DestPtr.
+void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+ EmitFinalDestCopy(E, LV);
+}
+
+/// \brief True if the given aggregate type requires special GC API calls.
+bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
+ // Only record types have members that might require garbage collection.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy) return false;
+
+ // Don't mess with non-trivial C++ types.
+ RecordDecl *Record = RecordTy->getDecl();
+ if (isa<CXXRecordDecl>(Record) &&
+ (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
+ !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
+ return false;
+
+ // Check whether the type has an object member.
+ return Record->hasObjectMember();
+}
+
+/// \brief Perform the final move to DestPtr if RequiresGCollection is set.
+///
+/// The idea is that you do something like this:
+/// RValue Result = EmitSomething(..., getReturnValueSlot());
+/// EmitGCMove(E, Result);
+/// If GC doesn't interfere, this will cause the result to be emitted
+/// directly into the return value slot. If GC does interfere, a final
+/// move will be performed.
+void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
+ if (RequiresGCollection) {
+ std::pair<uint64_t, unsigned> TypeInfo =
+ CGF.getContext().getTypeInfo(E->getType());
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr,
+ Src.getAggregateAddr(),
+ SizeVal);
+ }
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
+ assert(Src.isAggregate() && "value must be aggregate value!");
+
+ // If DestPtr is null, then we're evaluating an aggregate expression
+ // in a context (like an expression statement) that doesn't care
+ // about the result. C says that an lvalue-to-rvalue conversion is
+ // performed in these cases; C++ says that it is not. In either
+ // case, we don't actually need to do anything unless the value is
+ // volatile.
+ if (DestPtr == 0) {
+ if (!Src.isVolatileQualified() ||
+ CGF.CGM.getLangOptions().CPlusPlus ||
+ (IgnoreResult && Ignore))
+ return;
+
+ // If the source is volatile, we must read from it; to do that, we need
+ // some place to put it.
+ DestPtr = CGF.CreateMemTemp(E->getType(), "agg.tmp");
+ }
+
+ if (RequiresGCollection) {
+ std::pair<uint64_t, unsigned> TypeInfo =
+ CGF.getContext().getTypeInfo(E->getType());
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
+ DestPtr, Src.getAggregateAddr(),
+ SizeVal);
+ return;
+ }
+ // If the result of the assignment is used, copy the LHS there also.
+ // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
+ // from the source as well, as we can't eliminate it if either operand
+ // is volatile, unless copy has volatile for both source and destination..
+ CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(),
+ VolatileDest|Src.isVolatileQualified());
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
+ assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
+
+ EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
+ Src.isVolatileQualified()),
+ Ignore);
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+void AggExprEmitter::VisitCastExpr(CastExpr *E) {
+ if (!DestPtr && E->getCastKind() != CK_Dynamic) {
+ Visit(E->getSubExpr());
+ return;
+ }
+
+ switch (E->getCastKind()) {
+ default: assert(0 && "Unhandled cast kind!");
+
+ case CK_Dynamic: {
+ assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
+ LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
+ // FIXME: Do we also need to handle property references here?
+ if (LV.isSimple())
+ CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
+ else
+ CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
+
+ if (DestPtr)
+ CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
+ break;
+ }
+
+ case CK_ToUnion: {
+ // GCC union extension
+ QualType Ty = E->getSubExpr()->getType();
+ QualType PtrTy = CGF.getContext().getPointerType(Ty);
+ llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr,
+ CGF.ConvertType(PtrTy));
+ EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty),
+ Ty);
+ break;
+ }
+
+ case CK_DerivedToBase:
+ case CK_BaseToDerived:
+ case CK_UncheckedDerivedToBase: {
+ assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: "
+ "should have been unpacked before we got here");
+ break;
+ }
+
+ // FIXME: Remove the CK_Unknown check here.
+ case CK_Unknown:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
+ E->getType()) &&
+ "Implicit cast types must be compatible");
+ Visit(E->getSubExpr());
+ break;
+
+ case CK_LValueBitCast:
+ llvm_unreachable("there are no lvalue bit-casts on aggregates");
+ break;
+ }
+}
+
+void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType()) {
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
+}
+
+void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
+}
+
+void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ RValue RV = CGF.EmitObjCPropertyGet(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
+}
+
+void AggExprEmitter::VisitObjCImplicitSetterGetterRefExpr(
+ ObjCImplicitSetterGetterRefExpr *E) {
+ RValue RV = CGF.EmitObjCPropertyGet(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
+}
+
+void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitAnyExpr(E->getLHS(), 0, false, true);
+ CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest,
+ /*IgnoreResult=*/false, IsInitializer);
+}
+
+void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest);
+}
+
+void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
+ VisitPointerToDataMemberBinaryOperator(E);
+ else
+ CGF.ErrorUnsupported(E, "aggregate binary expression");
+}
+
+void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
+ const BinaryOperator *E) {
+ LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
+ EmitFinalDestCopy(E, LV);
+}
+
+void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType())
+ && "Invalid assignment");
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // We have to special case property setters, otherwise we must have
+ // a simple lvalue (no aggregates inside vectors, bitfields).
+ if (LHS.isPropertyRef()) {
+ llvm::Value *AggLoc = DestPtr;
+ if (!AggLoc)
+ AggLoc = CGF.CreateMemTemp(E->getRHS()->getType());
+ CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
+ RValue::getAggregate(AggLoc, VolatileDest));
+ } else if (LHS.isKVCRef()) {
+ llvm::Value *AggLoc = DestPtr;
+ if (!AggLoc)
+ AggLoc = CGF.CreateMemTemp(E->getRHS()->getType());
+ CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
+ RValue::getAggregate(AggLoc, VolatileDest));
+ } else {
+ bool RequiresGCollection = false;
+ if (CGF.getContext().getLangOptions().getGCMode())
+ RequiresGCollection = TypeRequiresGCollection(E->getLHS()->getType());
+
+ // Codegen the RHS so that it stores directly into the LHS.
+ CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified(),
+ false, false, RequiresGCollection);
+ EmitFinalDestCopy(E, LHS, true);
+ }
+}
+
+void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) {
+ if (!E->getLHS()) {
+ CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
+ return;
+ }
+
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+ CGF.BeginConditionalBranch();
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ assert(E->getLHS() && "Must have LHS for aggregate value");
+
+ Visit(E->getLHS());
+ CGF.EndConditionalBranch();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.BeginConditionalBranch();
+ CGF.EmitBlock(RHSBlock);
+
+ Visit(E->getRHS());
+ CGF.EndConditionalBranch();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+}
+
+void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
+ Visit(CE->getChosenSubExpr(CGF.getContext()));
+}
+
+void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
+ return;
+ }
+
+ EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
+}
+
+void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateMemTemp(E->getType(), "tmp");
+
+ // FIXME: volatile
+ CGF.EmitAggExpr(E->getSubExpr(), Val, false);
+ } else
+ Visit(E->getSubExpr());
+
+ // Don't make this a live temporary if we're emitting an initializer expr.
+ if (!IsInitializer)
+ CGF.EmitCXXTemporary(E->getTemporary(), Val);
+}
+
+void
+AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) // Create a temporary variable.
+ Val = CGF.CreateMemTemp(E->getType(), "tmp");
+
+ CGF.EmitCXXConstructExpr(Val, E);
+}
+
+void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ llvm::Value *Val = DestPtr;
+
+ CGF.EmitCXXExprWithTemporaries(E, Val, VolatileDest, IsInitializer);
+}
+
+void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateMemTemp(E->getType(), "tmp");
+ }
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Val, E->getType()),
+ E->getType());
+}
+
+void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateMemTemp(E->getType(), "tmp");
+ }
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Val, E->getType()),
+ E->getType());
+}
+
+void
+AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
+ // FIXME: Ignore result?
+ // FIXME: Are initializers affected by volatile?
+ if (isa<ImplicitValueInitExpr>(E)) {
+ EmitNullInitializationToLValue(LV, T);
+ } else if (T->isReferenceType()) {
+ RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
+ CGF.EmitStoreThroughLValue(RV, LV, T);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
+ } else if (CGF.hasAggregateLLVMType(T)) {
+ CGF.EmitAnyExpr(E, LV.getAddress(), false);
+ } else {
+ CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T);
+ }
+}
+
+void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
+ if (!CGF.hasAggregateLLVMType(T)) {
+ // For non-aggregates, we can store zero
+ llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
+ CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T);
+ } else {
+ // There's a potential optimization opportunity in combining
+ // memsets; that would be easy for arrays, but relatively
+ // difficult for structures with the current code.
+ CGF.EmitNullInitialization(LV.getAddress(), T);
+ }
+}
+
+void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
+#if 0
+ // FIXME: Assess perf here? Figure out what cases are worth optimizing here
+ // (Length of globals? Chunks of zeroed-out space?).
+ //
+ // If we can, prefer a copy from a global; this is a lot less code for long
+ // globals, and it's easier for the current optimizers to analyze.
+ if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
+ llvm::GlobalVariable* GV =
+ new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
+ llvm::GlobalValue::InternalLinkage, C, "");
+ EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
+ return;
+ }
+#endif
+ if (E->hadArrayRangeDesignator()) {
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+ }
+
+ // Handle initialization of an array.
+ if (E->getType()->isArrayType()) {
+ const llvm::PointerType *APType =
+ cast<llvm::PointerType>(DestPtr->getType());
+ const llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(APType->getElementType());
+
+ uint64_t NumInitElements = E->getNumInits();
+
+ if (E->getNumInits() > 0) {
+ QualType T1 = E->getType();
+ QualType T2 = E->getInit(0)->getType();
+ if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
+ EmitAggLoadOfLValue(E->getInit(0));
+ return;
+ }
+ }
+
+ uint64_t NumArrayElements = AType->getNumElements();
+ QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
+ ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
+
+ // FIXME: were we intentionally ignoring address spaces and GC attributes?
+
+ for (uint64_t i = 0; i != NumArrayElements; ++i) {
+ llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
+ LValue LV = CGF.MakeAddrLValue(NextVal, ElementType);
+ if (i < NumInitElements)
+ EmitInitializationToLValue(E->getInit(i), LV, ElementType);
+
+ else
+ EmitNullInitializationToLValue(LV, ElementType);
+ }
+ return;
+ }
+
+ assert(E->getType()->isRecordType() && "Only support structs/unions here!");
+
+ // Do struct initialization; this code just sets each individual member
+ // to the approprate value. This makes bitfield support automatic;
+ // the disadvantage is that the generated code is more difficult for
+ // the optimizer, especially with bitfields.
+ unsigned NumInitElements = E->getNumInits();
+ RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+
+ // If we're initializing the whole aggregate, just do it in place.
+ // FIXME: This is a hack around an AST bug (PR6537).
+ if (NumInitElements == 1 && E->getType() == E->getInit(0)->getType()) {
+ EmitInitializationToLValue(E->getInit(0),
+ CGF.MakeAddrLValue(DestPtr, E->getType()),
+ E->getType());
+ return;
+ }
+
+
+ if (E->getType()->isUnionType()) {
+ // Only initialize one field of a union. The field itself is
+ // specified by the initializer list.
+ if (!E->getInitializedFieldInUnion()) {
+ // Empty union; we have nothing to do.
+
+#ifndef NDEBUG
+ // Make sure that it's really an empty and not a failure of
+ // semantic analysis.
+ for (RecordDecl::field_iterator Field = SD->field_begin(),
+ FieldEnd = SD->field_end();
+ Field != FieldEnd; ++Field)
+ assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+#endif
+ return;
+ }
+
+ // FIXME: volatility
+ FieldDecl *Field = E->getInitializedFieldInUnion();
+ LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
+
+ if (NumInitElements) {
+ // Store the initializer into the field
+ EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType());
+ } else {
+ // Default-initialize to null
+ EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ }
+
+ return;
+ }
+
+ // Here we iterate over the fields; this makes it simpler to both
+ // default-initialize fields and skip over unnamed fields.
+ unsigned CurInitVal = 0;
+ for (RecordDecl::field_iterator Field = SD->field_begin(),
+ FieldEnd = SD->field_end();
+ Field != FieldEnd; ++Field) {
+ // We're done once we hit the flexible array member
+ if (Field->getType()->isIncompleteArrayType())
+ break;
+
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ // FIXME: volatility
+ LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
+ // We never generate write-barries for initialized fields.
+ FieldLoc.setNonGC(true);
+ if (CurInitVal < NumInitElements) {
+ // Store the initializer into the field.
+ EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc,
+ Field->getType());
+ } else {
+ // We're out of initalizers; default-initialize to null
+ EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Points into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitAggExpr - Emit the computation of the specified expression of aggregate
+/// type. The result is computed into DestPtr. Note that if DestPtr is null,
+/// the value of the aggregate expression is not needed. If VolatileDest is
+/// true, DestPtr cannot be 0.
+//
+// FIXME: Take Qualifiers object.
+void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
+ bool VolatileDest, bool IgnoreResult,
+ bool IsInitializer,
+ bool RequiresGCollection) {
+ assert(E && hasAggregateLLVMType(E->getType()) &&
+ "Invalid aggregate expression to emit");
+ assert ((DestPtr != 0 || VolatileDest == false)
+ && "volatile aggregate can't be 0");
+
+ AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult, IsInitializer,
+ RequiresGCollection)
+ .Visit(const_cast<Expr*>(E));
+}
+
+LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
+ assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
+ llvm::Value *Temp = CreateMemTemp(E->getType());
+ LValue LV = MakeAddrLValue(Temp, E->getType());
+ EmitAggExpr(E, Temp, LV.isVolatileQualified());
+ return LV;
+}
+
+void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
+ llvm::Value *SrcPtr, QualType Ty,
+ bool isVolatile) {
+ assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+ if (getContext().getLangOptions().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
+ assert((Record->hasTrivialCopyConstructor() ||
+ Record->hasTrivialCopyAssignment()) &&
+ "Trying to aggregate-copy a type without a trivial copy "
+ "constructor or assignment operator");
+ // Ignore empty classes in C++.
+ if (Record->isEmpty())
+ return;
+ }
+ }
+
+ // Aggregate assignment turns into llvm.memcpy. This is almost valid per
+ // C99 6.5.16.1p3, which states "If the value being stored in an object is
+ // read from another object that overlaps in anyway the storage of the first
+ // object, then the overlap shall be exact and the two objects shall have
+ // qualified or unqualified versions of a compatible type."
+ //
+ // memcpy is not defined if the source and destination pointers are exactly
+ // equal, but other compilers do this optimization, and almost every memcpy
+ // implementation handles this case safely. If there is a libc that does not
+ // safely handle this, we can add a target hook.
+
+ // Get size and alignment info for this aggregate.
+ std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+
+ // FIXME: Handle variable sized types.
+
+ // FIXME: If we have a volatile struct, the optimizer can remove what might
+ // appear to be `extra' memory ops:
+ //
+ // volatile struct { int i; } a, b;
+ //
+ // int main() {
+ // a = b;
+ // a = b;
+ // }
+ //
+ // we need to use a different call here. We use isVolatile to indicate when
+ // either the source or the destination is volatile.
+
+ const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
+ const llvm::Type *DBP =
+ llvm::Type::getInt8PtrTy(VMContext, DPT->getAddressSpace());
+ DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
+
+ const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
+ const llvm::Type *SBP =
+ llvm::Type::getInt8PtrTy(VMContext, SPT->getAddressSpace());
+ SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
+
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ RecordDecl *Record = RecordTy->getDecl();
+ if (Record->hasObjectMember()) {
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ } else if (getContext().getAsArrayType(Ty)) {
+ QualType BaseType = getContext().getBaseElementType(Ty);
+ if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
+ if (RecordTy->getDecl()->hasObjectMember()) {
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ }
+ }
+
+ Builder.CreateCall5(CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(),
+ IntPtrTy),
+ DestPtr, SrcPtr,
+ // TypeInfo.first describes size in bits.
+ llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
+ Builder.getInt32(TypeInfo.second/8),
+ Builder.getInt1(isVolatile));
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
new file mode 100644
index 0000000..9a98281
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
@@ -0,0 +1,1216 @@
+//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ expressions
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ llvm::Value *This,
+ llvm::Value *VTT,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This),
+ MD->getThisType(getContext())));
+
+ // If there is a VTT parameter, emit it.
+ if (VTT) {
+ QualType T = getContext().getPointerType(getContext().VoidPtrTy);
+ Args.push_back(std::make_pair(RValue::get(VTT), T));
+ }
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+
+ QualType ResultType = FPT->getResultType();
+ return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
+ FPT->getExtInfo()),
+ Callee, ReturnValue, Args, MD);
+}
+
+/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
+/// expr can be devirtualized.
+static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // This is a record decl. We know the type and can devirtualize it.
+ return VD->getType()->isRecordType();
+ }
+
+ return false;
+ }
+
+ // We can always devirtualize calls on temporary object expressions.
+ if (isa<CXXConstructExpr>(Base))
+ return true;
+
+ // And calls on bound temporaries.
+ if (isa<CXXBindTemporaryExpr>(Base))
+ return true;
+
+ // Check if this is a call expr that returns a record type.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+ return CE->getCallReturnType()->isRecordType();
+
+ // We can't devirtualize the call.
+ return false;
+}
+
+RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
+ ReturnValueSlot ReturnValue) {
+ if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
+ return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
+
+ const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+
+ if (MD->isStatic()) {
+ // The method is static, emit it as we would a regular call.
+ llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
+ return EmitCall(getContext().getPointerType(MD->getType()), Callee,
+ ReturnValue, CE->arg_begin(), CE->arg_end());
+ }
+
+ // Compute the object pointer.
+ llvm::Value *This;
+ if (ME->isArrow())
+ This = EmitScalarExpr(ME->getBase());
+ else {
+ LValue BaseLV = EmitLValue(ME->getBase());
+ This = BaseLV.getAddress();
+ }
+
+ if (MD->isTrivial()) {
+ if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
+
+ assert(MD->isCopyAssignment() && "unknown trivial member function");
+ // We don't like to generate the trivial copy assignment operator when
+ // it isn't necessary; just produce the proper effect here.
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, CE->getType());
+ return RValue::get(This);
+ }
+
+ // Compute the function type we're calling.
+ const CGFunctionInfo &FInfo =
+ (isa<CXXDestructorDecl>(MD)
+ ? CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete)
+ : CGM.getTypes().getFunctionInfo(MD));
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty
+ = CGM.getTypes().GetFunctionType(FInfo, FPT->isVariadic());
+
+ // C++ [class.virtual]p12:
+ // Explicit qualification with the scope operator (5.1) suppresses the
+ // virtual call mechanism.
+ //
+ // We also don't emit a virtual call if the base expression has a record type
+ // because then we know what the type is.
+ bool UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
+ && !canDevirtualizeMemberFunctionCalls(ME->getBase());
+
+ llvm::Value *Callee;
+ if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (UseVirtualCall) {
+ Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
+ } else {
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
+ }
+ } else if (UseVirtualCall) {
+ Callee = BuildVirtualCall(MD, This, Ty);
+ } else {
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
+ }
+
+ return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+ CE->arg_begin(), CE->arg_end());
+}
+
+RValue
+CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ const BinaryOperator *BO =
+ cast<BinaryOperator>(E->getCallee()->IgnoreParens());
+ const Expr *BaseExpr = BO->getLHS();
+ const Expr *MemFnExpr = BO->getRHS();
+
+ const MemberPointerType *MPT =
+ MemFnExpr->getType()->getAs<MemberPointerType>();
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ // Get the member function pointer.
+ llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
+
+ // Emit the 'this' pointer.
+ llvm::Value *This;
+
+ if (BO->getOpcode() == BO_PtrMemI)
+ This = EmitScalarExpr(BaseExpr);
+ else
+ This = EmitLValue(BaseExpr).getAddress();
+
+ // Ask the ABI to load the callee. Note that This is modified.
+ llvm::Value *Callee =
+ CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT);
+
+ CallArgList Args;
+
+ QualType ThisType =
+ getContext().getPointerType(getContext().getTagDeclType(RD));
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This), ThisType));
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
+ const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
+ return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
+ ReturnValue, Args);
+}
+
+RValue
+CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+ if (MD->isCopyAssignment()) {
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
+ if (ClassDecl->hasTrivialCopyAssignment()) {
+ assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
+ "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
+ LValue LV = EmitLValue(E->getArg(0));
+ llvm::Value *This;
+ if (LV.isPropertyRef() || LV.isKVCRef()) {
+ llvm::Value *AggLoc = CreateMemTemp(E->getArg(1)->getType());
+ EmitAggExpr(E->getArg(1), AggLoc, false /*VolatileDest*/);
+ if (LV.isPropertyRef())
+ EmitObjCPropertySet(LV.getPropertyRefExpr(),
+ RValue::getAggregate(AggLoc,
+ false /*VolatileDest*/));
+ else
+ EmitObjCPropertySet(LV.getKVCRefExpr(),
+ RValue::getAggregate(AggLoc,
+ false /*VolatileDest*/));
+ return RValue::getAggregate(0, false);
+ }
+ else
+ This = LV.getAddress();
+
+ llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
+ QualType Ty = E->getType();
+ EmitAggregateCopy(This, Src, Ty);
+ return RValue::get(This);
+ }
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ LValue LV = EmitLValue(E->getArg(0));
+ llvm::Value *This;
+ if (LV.isPropertyRef() || LV.isKVCRef()) {
+ QualType QT = E->getArg(0)->getType();
+ RValue RV =
+ LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
+ : EmitLoadOfKVCRefLValue(LV, QT);
+ assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr");
+ This = RV.getAggregateAddr();
+ }
+ else
+ This = LV.getAddress();
+
+ llvm::Value *Callee;
+ if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
+ Callee = BuildVirtualCall(MD, This, Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
+
+ return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+ E->arg_begin() + 1, E->arg_end());
+}
+
+void
+CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
+ const CXXConstructExpr *E) {
+ assert(Dest && "Must have a destination!");
+ const CXXConstructorDecl *CD = E->getConstructor();
+
+ // If we require zero initialization before (or instead of) calling the
+ // constructor, as can be the case with a non-user-provided default
+ // constructor, emit the zero initialization now.
+ if (E->requiresZeroInitialization())
+ EmitNullInitialization(Dest, E->getType());
+
+
+ // If this is a call to a trivial default constructor, do nothing.
+ if (CD->isTrivial() && CD->isDefaultConstructor())
+ return;
+
+ // Code gen optimization to eliminate copy constructor and return
+ // its first argument instead, if in fact that argument is a temporary
+ // object.
+ if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
+ if (const Expr *Arg = E->getArg(0)->getTemporaryObject()) {
+ EmitAggExpr(Arg, Dest, false);
+ return;
+ }
+ }
+
+ const ConstantArrayType *Array
+ = getContext().getAsConstantArrayType(E->getType());
+ if (Array) {
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(Dest, BasePtr);
+
+ EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
+ E->arg_begin(), E->arg_end());
+ }
+ else {
+ CXXCtorType Type =
+ (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
+ ? Ctor_Complete : Ctor_Base;
+ bool ForVirtualBase =
+ E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
+
+ // Call the constructor.
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest,
+ E->arg_begin(), E->arg_end());
+ }
+}
+
+/// Check whether the given operator new[] is the global placement
+/// operator new[].
+static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
+ const FunctionDecl *Fn) {
+ // Must be in global scope. Note that allocation functions can't be
+ // declared in namespaces.
+ if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
+ return false;
+
+ // Signature must be void *operator new[](size_t, void*).
+ // The size_t is common to all operator new[]s.
+ if (Fn->getNumParams() != 2)
+ return false;
+
+ CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
+ return (ParamType == Ctx.VoidPtrTy);
+}
+
+static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
+ const CXXNewExpr *E) {
+ if (!E->isArray())
+ return CharUnits::Zero();
+
+ // No cookie is required if the new operator being used is
+ // ::operator new[](size_t, void*).
+ const FunctionDecl *OperatorNew = E->getOperatorNew();
+ if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
+ return CharUnits::Zero();
+
+ return CGF.CGM.getCXXABI().GetArrayCookieSize(E->getAllocatedType());
+}
+
+static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
+ CodeGenFunction &CGF,
+ const CXXNewExpr *E,
+ llvm::Value *&NumElements,
+ llvm::Value *&SizeWithoutCookie) {
+ QualType ElemType = E->getAllocatedType();
+
+ const llvm::IntegerType *SizeTy =
+ cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
+
+ CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
+
+ if (!E->isArray()) {
+ SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
+ return SizeWithoutCookie;
+ }
+
+ // Figure out the cookie size.
+ CharUnits CookieSize = CalculateCookiePadding(CGF, E);
+
+ // Emit the array size expression.
+ // We multiply the size of all dimensions for NumElements.
+ // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
+ NumElements = CGF.EmitScalarExpr(E->getArraySize());
+ assert(NumElements->getType() == SizeTy && "element count not a size_t");
+
+ uint64_t ArraySizeMultiplier = 1;
+ while (const ConstantArrayType *CAT
+ = CGF.getContext().getAsConstantArrayType(ElemType)) {
+ ElemType = CAT->getElementType();
+ ArraySizeMultiplier *= CAT->getSize().getZExtValue();
+ }
+
+ llvm::Value *Size;
+
+ // If someone is doing 'new int[42]' there is no need to do a dynamic check.
+ // Don't bloat the -O0 code.
+ if (llvm::ConstantInt *NumElementsC =
+ dyn_cast<llvm::ConstantInt>(NumElements)) {
+ llvm::APInt NEC = NumElementsC->getValue();
+ unsigned SizeWidth = NEC.getBitWidth();
+
+ // Determine if there is an overflow here by doing an extended multiply.
+ NEC.zext(SizeWidth*2);
+ llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
+ SC *= NEC;
+
+ if (!CookieSize.isZero()) {
+ // Save the current size without a cookie. We don't care if an
+ // overflow's already happened because SizeWithoutCookie isn't
+ // used if the allocator returns null or throws, as it should
+ // always do on an overflow.
+ llvm::APInt SWC = SC;
+ SWC.trunc(SizeWidth);
+ SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
+
+ // Add the cookie size.
+ SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
+ }
+
+ if (SC.countLeadingZeros() >= SizeWidth) {
+ SC.trunc(SizeWidth);
+ Size = llvm::ConstantInt::get(SizeTy, SC);
+ } else {
+ // On overflow, produce a -1 so operator new throws.
+ Size = llvm::Constant::getAllOnesValue(SizeTy);
+ }
+
+ // Scale NumElements while we're at it.
+ uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
+ NumElements = llvm::ConstantInt::get(SizeTy, N);
+
+ // Otherwise, we don't need to do an overflow-checked multiplication if
+ // we're multiplying by one.
+ } else if (TypeSize.isOne()) {
+ assert(ArraySizeMultiplier == 1);
+
+ Size = NumElements;
+
+ // If we need a cookie, add its size in with an overflow check.
+ // This is maybe a little paranoid.
+ if (!CookieSize.isZero()) {
+ SizeWithoutCookie = Size;
+
+ llvm::Value *CookieSizeV
+ = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
+
+ const llvm::Type *Types[] = { SizeTy };
+ llvm::Value *UAddF
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
+ llvm::Value *AddRes
+ = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
+
+ Size = CGF.Builder.CreateExtractValue(AddRes, 0);
+ llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
+ Size = CGF.Builder.CreateSelect(DidOverflow,
+ llvm::ConstantInt::get(SizeTy, -1),
+ Size);
+ }
+
+ // Otherwise use the int.umul.with.overflow intrinsic.
+ } else {
+ llvm::Value *OutermostElementSize
+ = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
+
+ llvm::Value *NumOutermostElements = NumElements;
+
+ // Scale NumElements by the array size multiplier. This might
+ // overflow, but only if the multiplication below also overflows,
+ // in which case this multiplication isn't used.
+ if (ArraySizeMultiplier != 1)
+ NumElements = CGF.Builder.CreateMul(NumElements,
+ llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
+
+ // The requested size of the outermost array is non-constant.
+ // Multiply that by the static size of the elements of that array;
+ // on unsigned overflow, set the size to -1 to trigger an
+ // exception from the allocation routine. This is sufficient to
+ // prevent buffer overruns from the allocator returning a
+ // seemingly valid pointer to insufficient space. This idea comes
+ // originally from MSVC, and GCC has an open bug requesting
+ // similar behavior:
+ // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
+ //
+ // This will not be sufficient for C++0x, which requires a
+ // specific exception class (std::bad_array_new_length).
+ // That will require ABI support that has not yet been specified.
+ const llvm::Type *Types[] = { SizeTy };
+ llvm::Value *UMulF
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
+ llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
+ OutermostElementSize);
+
+ // The overflow bit.
+ llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
+
+ // The result of the multiplication.
+ Size = CGF.Builder.CreateExtractValue(MulRes, 0);
+
+ // If we have a cookie, we need to add that size in, too.
+ if (!CookieSize.isZero()) {
+ SizeWithoutCookie = Size;
+
+ llvm::Value *CookieSizeV
+ = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
+ llvm::Value *UAddF
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
+ llvm::Value *AddRes
+ = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
+
+ Size = CGF.Builder.CreateExtractValue(AddRes, 0);
+
+ llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
+ DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow);
+ }
+
+ Size = CGF.Builder.CreateSelect(DidOverflow,
+ llvm::ConstantInt::get(SizeTy, -1),
+ Size);
+ }
+
+ if (CookieSize.isZero())
+ SizeWithoutCookie = Size;
+ else
+ assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
+
+ return Size;
+}
+
+static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
+ llvm::Value *NewPtr) {
+
+ assert(E->getNumConstructorArgs() == 1 &&
+ "Can only have one argument to initializer of POD type.");
+
+ const Expr *Init = E->getConstructorArg(0);
+ QualType AllocType = E->getAllocatedType();
+
+ unsigned Alignment =
+ CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
+ if (!CGF.hasAggregateLLVMType(AllocType))
+ CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
+ AllocType.isVolatileQualified(), Alignment,
+ AllocType);
+ else if (AllocType->isAnyComplexType())
+ CGF.EmitComplexExprIntoAddr(Init, NewPtr,
+ AllocType.isVolatileQualified());
+ else
+ CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+}
+
+void
+CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements) {
+ // We have a POD type.
+ if (E->getNumConstructorArgs() == 0)
+ return;
+
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // Create a temporary for the loop index and initialize it with 0.
+ llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ Builder.CreateStore(Zero, IndexPtr);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index < number-of-elements fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
+ "arrayidx");
+ StoreAnyExprIntoOneUnit(*this, E, Address);
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
+ Counter = Builder.CreateLoad(IndexPtr);
+ NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+ Builder.CreateStore(NextVal, IndexPtr);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
+ llvm::Value *NewPtr, llvm::Value *Size) {
+ llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext();
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+ if (NewPtr->getType() != BP)
+ NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp");
+
+ CGF.Builder.CreateCall5(CGF.CGM.getMemSetFn(BP, CGF.IntPtrTy), NewPtr,
+ llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
+ Size,
+ llvm::ConstantInt::get(CGF.Int32Ty,
+ CGF.getContext().getTypeAlign(T)/8),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
+ 0));
+}
+
+static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ llvm::Value *AllocSizeWithoutCookie) {
+ if (E->isArray()) {
+ if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+ bool RequiresZeroInitialization = false;
+ if (Ctor->getParent()->hasTrivialConstructor()) {
+ // If new expression did not specify value-initialization, then there
+ // is no initialization.
+ if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
+ return;
+
+ if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+ EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
+ AllocSizeWithoutCookie);
+ return;
+ }
+
+ RequiresZeroInitialization = true;
+ }
+
+ CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
+ E->constructor_arg_begin(),
+ E->constructor_arg_end(),
+ RequiresZeroInitialization);
+ return;
+ } else if (E->getNumConstructorArgs() == 1 &&
+ isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+ EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
+ AllocSizeWithoutCookie);
+ return;
+ } else {
+ CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
+ return;
+ }
+ }
+
+ if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+ // Per C++ [expr.new]p15, if we have an initializer, then we're performing
+ // direct initialization. C++ [dcl.init]p5 requires that we
+ // zero-initialize storage if there are no user-declared constructors.
+ if (E->hasInitializer() &&
+ !Ctor->getParent()->hasUserDeclaredConstructor() &&
+ !Ctor->getParent()->isEmpty())
+ CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
+
+ CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
+ NewPtr, E->constructor_arg_begin(),
+ E->constructor_arg_end());
+
+ return;
+ }
+ // We have a POD type.
+ if (E->getNumConstructorArgs() == 0)
+ return;
+
+ StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
+}
+
+llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
+ QualType AllocType = E->getAllocatedType();
+ if (AllocType->isArrayType())
+ while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
+ AllocType = AType->getElementType();
+
+ FunctionDecl *NewFD = E->getOperatorNew();
+ const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList NewArgs;
+
+ // The allocation size is the first argument.
+ QualType SizeTy = getContext().getSizeType();
+
+ llvm::Value *NumElements = 0;
+ llvm::Value *AllocSizeWithoutCookie = 0;
+ llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
+ *this, E, NumElements,
+ AllocSizeWithoutCookie);
+
+ NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
+
+ // Emit the rest of the arguments.
+ // FIXME: Ideally, this should just use EmitCallArgs.
+ CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
+
+ // First, use the types from the function type.
+ // We start at 1 here because the first argument (the allocation size)
+ // has already been emitted.
+ for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
+ QualType ArgType = NewFTy->getArgType(i);
+
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
+ "type mismatch in call argument!");
+
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+ ArgType));
+
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
+ NewArg != NewArgEnd; ++NewArg) {
+ QualType ArgType = NewArg->getType();
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+ ArgType));
+ }
+
+ // Emit the call to new.
+ RValue RV =
+ EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
+ CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
+
+ // If an allocation function is declared with an empty exception specification
+ // it returns null to indicate failure to allocate storage. [expr.new]p13.
+ // (We don't need to check for null when there's no new initializer and
+ // we're allocating a POD type).
+ bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
+ !(AllocType->isPODType() && !E->hasInitializer());
+
+ llvm::BasicBlock *NullCheckSource = 0;
+ llvm::BasicBlock *NewNotNull = 0;
+ llvm::BasicBlock *NewEnd = 0;
+
+ llvm::Value *NewPtr = RV.getScalarVal();
+ unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+
+ if (NullCheckResult) {
+ NullCheckSource = Builder.GetInsertBlock();
+ NewNotNull = createBasicBlock("new.notnull");
+ NewEnd = createBasicBlock("new.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull");
+ Builder.CreateCondBr(IsNull, NewEnd, NewNotNull);
+ EmitBlock(NewNotNull);
+ }
+
+ assert((AllocSize == AllocSizeWithoutCookie) ==
+ CalculateCookiePadding(*this, E).isZero());
+ if (AllocSize != AllocSizeWithoutCookie) {
+ assert(E->isArray());
+ NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements,
+ AllocType);
+ }
+
+ const llvm::Type *ElementPtrTy
+ = ConvertTypeForMem(AllocType)->getPointerTo(AS);
+ NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
+ if (E->isArray()) {
+ EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
+
+ // NewPtr is a pointer to the base element type. If we're
+ // allocating an array of arrays, we'll need to cast back to the
+ // array pointer type.
+ const llvm::Type *ResultTy = ConvertTypeForMem(E->getType());
+ if (NewPtr->getType() != ResultTy)
+ NewPtr = Builder.CreateBitCast(NewPtr, ResultTy);
+ } else {
+ EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
+ }
+
+ if (NullCheckResult) {
+ Builder.CreateBr(NewEnd);
+ llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock();
+ EmitBlock(NewEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(NewPtr, NotNullSource);
+ PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()),
+ NullCheckSource);
+
+ NewPtr = PHI;
+ }
+
+ return NewPtr;
+}
+
+void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
+ llvm::Value *Ptr,
+ QualType DeleteTy) {
+ assert(DeleteFD->getOverloadedOperator() == OO_Delete);
+
+ const FunctionProtoType *DeleteFTy =
+ DeleteFD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList DeleteArgs;
+
+ // Check if we need to pass the size to the delete operator.
+ llvm::Value *Size = 0;
+ QualType SizeTy;
+ if (DeleteFTy->getNumArgs() == 2) {
+ SizeTy = DeleteFTy->getArgType(1);
+ CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
+ Size = llvm::ConstantInt::get(ConvertType(SizeTy),
+ DeleteTypeSize.getQuantity());
+ }
+
+ QualType ArgTy = DeleteFTy->getArgType(0);
+ llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
+ DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
+
+ if (Size)
+ DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
+
+ // Emit the call to delete.
+ EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
+ CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
+ DeleteArgs, DeleteFD);
+}
+
+namespace {
+ /// Calls the given 'operator delete' on a single object.
+ struct CallObjectDelete : EHScopeStack::Cleanup {
+ llvm::Value *Ptr;
+ const FunctionDecl *OperatorDelete;
+ QualType ElementType;
+
+ CallObjectDelete(llvm::Value *Ptr,
+ const FunctionDecl *OperatorDelete,
+ QualType ElementType)
+ : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
+ }
+ };
+}
+
+/// Emit the code for deleting a single object.
+static void EmitObjectDelete(CodeGenFunction &CGF,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ QualType ElementType) {
+ // Find the destructor for the type, if applicable. If the
+ // destructor is virtual, we'll just emit the vcall and return.
+ const CXXDestructorDecl *Dtor = 0;
+ if (const RecordType *RT = ElementType->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD->hasTrivialDestructor()) {
+ Dtor = RD->getDestructor();
+
+ if (Dtor->isVirtual()) {
+ const llvm::Type *Ty =
+ CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
+ Dtor_Complete),
+ /*isVariadic=*/false);
+
+ llvm::Value *Callee
+ = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
+ CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
+ 0, 0);
+
+ // The dtor took care of deleting the object.
+ return;
+ }
+ }
+ }
+
+ // Make sure that we call delete even if the dtor throws.
+ CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
+ Ptr, OperatorDelete, ElementType);
+
+ if (Dtor)
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Ptr);
+
+ CGF.PopCleanupBlock();
+}
+
+namespace {
+ /// Calls the given 'operator delete' on an array of objects.
+ struct CallArrayDelete : EHScopeStack::Cleanup {
+ llvm::Value *Ptr;
+ const FunctionDecl *OperatorDelete;
+ llvm::Value *NumElements;
+ QualType ElementType;
+ CharUnits CookieSize;
+
+ CallArrayDelete(llvm::Value *Ptr,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *NumElements,
+ QualType ElementType,
+ CharUnits CookieSize)
+ : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
+ ElementType(ElementType), CookieSize(CookieSize) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const FunctionProtoType *DeleteFTy =
+ OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
+
+ CallArgList Args;
+
+ // Pass the pointer as the first argument.
+ QualType VoidPtrTy = DeleteFTy->getArgType(0);
+ llvm::Value *DeletePtr
+ = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
+ Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
+
+ // Pass the original requested size as the second argument.
+ if (DeleteFTy->getNumArgs() == 2) {
+ QualType size_t = DeleteFTy->getArgType(1);
+ const llvm::IntegerType *SizeTy
+ = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
+
+ CharUnits ElementTypeSize =
+ CGF.CGM.getContext().getTypeSizeInChars(ElementType);
+
+ // The size of an element, multiplied by the number of elements.
+ llvm::Value *Size
+ = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
+ Size = CGF.Builder.CreateMul(Size, NumElements);
+
+ // Plus the size of the cookie if applicable.
+ if (!CookieSize.isZero()) {
+ llvm::Value *CookieSizeV
+ = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
+ Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
+ }
+
+ Args.push_back(std::make_pair(RValue::get(Size), size_t));
+ }
+
+ // Emit the call to delete.
+ CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), Args, OperatorDelete);
+ }
+ };
+}
+
+/// Emit the code for deleting an array of objects.
+static void EmitArrayDelete(CodeGenFunction &CGF,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ QualType ElementType) {
+ llvm::Value *NumElements = 0;
+ llvm::Value *AllocatedPtr = 0;
+ CharUnits CookieSize;
+ CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, ElementType,
+ NumElements, AllocatedPtr, CookieSize);
+
+ assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
+
+ // Make sure that we call delete even if one of the dtors throws.
+ CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
+ AllocatedPtr, OperatorDelete,
+ NumElements, ElementType,
+ CookieSize);
+
+ if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
+ if (!RD->hasTrivialDestructor()) {
+ assert(NumElements && "ReadArrayCookie didn't find element count"
+ " for a class with destructor");
+ CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
+ }
+ }
+
+ CGF.PopCleanupBlock();
+}
+
+void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
+
+ // Get at the argument before we performed the implicit conversion
+ // to void*.
+ const Expr *Arg = E->getArgument();
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ if (ICE->getCastKind() != CK_UserDefinedConversion &&
+ ICE->getType()->isVoidPointerType())
+ Arg = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ llvm::Value *Ptr = EmitScalarExpr(Arg);
+
+ // Null check the pointer.
+ llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
+ llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
+ "isnull");
+
+ Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
+ EmitBlock(DeleteNotNull);
+
+ // We might be deleting a pointer to array. If so, GEP down to the
+ // first non-array element.
+ // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
+ QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
+ if (DeleteTy->isConstantArrayType()) {
+ llvm::Value *Zero = Builder.getInt32(0);
+ llvm::SmallVector<llvm::Value*,8> GEP;
+
+ GEP.push_back(Zero); // point at the outermost array
+
+ // For each layer of array type we're pointing at:
+ while (const ConstantArrayType *Arr
+ = getContext().getAsConstantArrayType(DeleteTy)) {
+ // 1. Unpeel the array type.
+ DeleteTy = Arr->getElementType();
+
+ // 2. GEP to the first element of the array.
+ GEP.push_back(Zero);
+ }
+
+ Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
+ }
+
+ assert(ConvertTypeForMem(DeleteTy) ==
+ cast<llvm::PointerType>(Ptr->getType())->getElementType());
+
+ if (E->isArrayForm()) {
+ EmitArrayDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
+ } else {
+ EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
+ }
+
+ EmitBlock(DeleteEnd);
+}
+
+llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
+ QualType Ty = E->getType();
+ const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
+
+ if (E->isTypeOperand()) {
+ llvm::Constant *TypeInfo =
+ CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
+ return Builder.CreateBitCast(TypeInfo, LTy);
+ }
+
+ Expr *subE = E->getExprOperand();
+ Ty = subE->getType();
+ CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
+ Ty = CanTy.getUnqualifiedType().getNonReferenceType();
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->isPolymorphic()) {
+ // FIXME: if subE is an lvalue do
+ LValue Obj = EmitLValue(subE);
+ llvm::Value *This = Obj.getAddress();
+ LTy = LTy->getPointerTo()->getPointerTo();
+ llvm::Value *V = Builder.CreateBitCast(This, LTy);
+ // We need to do a zero check for *p, unless it has NonNullAttr.
+ // FIXME: PointerType->hasAttr<NonNullAttr>()
+ bool CanBeZero = false;
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
+ if (UO->getOpcode() == UO_Deref)
+ CanBeZero = true;
+ if (CanBeZero) {
+ llvm::BasicBlock *NonZeroBlock = createBasicBlock();
+ llvm::BasicBlock *ZeroBlock = createBasicBlock();
+
+ llvm::Value *Zero = llvm::Constant::getNullValue(LTy);
+ Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero),
+ NonZeroBlock, ZeroBlock);
+ EmitBlock(ZeroBlock);
+ /// Call __cxa_bad_typeid
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ const llvm::FunctionType *FTy;
+ FTy = llvm::FunctionType::get(ResultType, false);
+ llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
+ Builder.CreateCall(F)->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ EmitBlock(NonZeroBlock);
+ }
+ V = Builder.CreateLoad(V, "vtable");
+ V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
+ V = Builder.CreateLoad(V);
+ return V;
+ }
+ }
+ return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
+}
+
+llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
+ const CXXDynamicCastExpr *DCE) {
+ QualType SrcTy = DCE->getSubExpr()->getType();
+ QualType DestTy = DCE->getTypeAsWritten();
+ QualType InnerType = DestTy->getPointeeType();
+
+ const llvm::Type *LTy = ConvertType(DCE->getType());
+
+ bool CanBeZero = false;
+ bool ToVoid = false;
+ bool ThrowOnBad = false;
+ if (DestTy->isPointerType()) {
+ // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
+ CanBeZero = true;
+ if (InnerType->isVoidType())
+ ToVoid = true;
+ } else {
+ LTy = LTy->getPointerTo();
+
+ // FIXME: What if exceptions are disabled?
+ ThrowOnBad = true;
+ }
+
+ if (SrcTy->isPointerType() || SrcTy->isReferenceType())
+ SrcTy = SrcTy->getPointeeType();
+ SrcTy = SrcTy.getUnqualifiedType();
+
+ if (DestTy->isPointerType() || DestTy->isReferenceType())
+ DestTy = DestTy->getPointeeType();
+ DestTy = DestTy.getUnqualifiedType();
+
+ llvm::BasicBlock *ContBlock = createBasicBlock();
+ llvm::BasicBlock *NullBlock = 0;
+ llvm::BasicBlock *NonZeroBlock = 0;
+ if (CanBeZero) {
+ NonZeroBlock = createBasicBlock();
+ NullBlock = createBasicBlock();
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
+ EmitBlock(NonZeroBlock);
+ }
+
+ llvm::BasicBlock *BadCastBlock = 0;
+
+ const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
+
+ // See if this is a dynamic_cast(void*)
+ if (ToVoid) {
+ llvm::Value *This = V;
+ V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo());
+ V = Builder.CreateLoad(V, "vtable");
+ V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
+ V = Builder.CreateLoad(V, "offset to top");
+ This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
+ V = Builder.CreateInBoundsGEP(This, V);
+ V = Builder.CreateBitCast(V, LTy);
+ } else {
+ /// Call __dynamic_cast
+ const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *PtrToInt8Ty
+ = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrDiffTy);
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+
+ // FIXME: Calculate better hint.
+ llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
+
+ assert(SrcTy->isRecordType() && "Src type must be record type!");
+ assert(DestTy->isRecordType() && "Dest type must be record type!");
+
+ llvm::Value *SrcArg
+ = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
+ llvm::Value *DestArg
+ = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
+
+ V = Builder.CreateBitCast(V, PtrToInt8Ty);
+ V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
+ V, SrcArg, DestArg, hint);
+ V = Builder.CreateBitCast(V, LTy);
+
+ if (ThrowOnBad) {
+ BadCastBlock = createBasicBlock();
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
+ EmitBlock(BadCastBlock);
+ /// Invoke __cxa_bad_cast
+ ResultType = llvm::Type::getVoidTy(VMContext);
+ const llvm::FunctionType *FBadTy;
+ FBadTy = llvm::FunctionType::get(ResultType, false);
+ llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
+ if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
+ EmitBlock(Cont);
+ } else {
+ // FIXME: Does this ever make sense?
+ Builder.CreateCall(F)->setDoesNotReturn();
+ }
+ Builder.CreateUnreachable();
+ }
+ }
+
+ if (CanBeZero) {
+ Builder.CreateBr(ContBlock);
+ EmitBlock(NullBlock);
+ Builder.CreateBr(ContBlock);
+ }
+ EmitBlock(ContBlock);
+ if (CanBeZero) {
+ llvm::PHINode *PHI = Builder.CreatePHI(LTy);
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(V, NonZeroBlock);
+ PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
+ V = PHI;
+ }
+
+ return V;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
new file mode 100644
index 0000000..79e9dd4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
@@ -0,0 +1,751 @@
+//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with complex types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Complex Expression Emitter
+//===----------------------------------------------------------------------===//
+
+typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
+
+namespace {
+class ComplexExprEmitter
+ : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ // True is we should ignore the value of a
+ bool IgnoreReal;
+ bool IgnoreImag;
+ // True if we should ignore the value of a=b
+ bool IgnoreRealAssign;
+ bool IgnoreImagAssign;
+public:
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false,
+ bool irn=false, bool iin=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii),
+ IgnoreRealAssign(irn), IgnoreImagAssign(iin) {
+ }
+
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreReal() {
+ bool I = IgnoreReal;
+ IgnoreReal = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImag() {
+ bool I = IgnoreImag;
+ IgnoreImag = false;
+ return I;
+ }
+ bool TestAndClearIgnoreRealAssign() {
+ bool I = IgnoreRealAssign;
+ IgnoreRealAssign = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImagAssign() {
+ bool I = IgnoreImagAssign;
+ IgnoreImagAssign = false;
+ return I;
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ ComplexPairTy EmitLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+ if (LV.isSimple())
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+
+ if (LV.isPropertyRef())
+ return CGF.EmitObjCPropertyGet(LV.getPropertyRefExpr()).getComplexVal();
+
+ assert(LV.isKVCRef() && "Unknown LValue type!");
+ return CGF.EmitObjCPropertyGet(LV.getKVCRefExpr()).getComplexVal();
+ }
+
+ /// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
+ /// the real and imaginary pieces.
+ ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile);
+
+ /// EmitStoreOfComplex - Store the specified real/imag parts into the
+ /// specified value pointer.
+ void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol);
+
+ /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
+ QualType DestType);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ ComplexPairTy VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ assert(0 && "Stmt can't have complex result type!");
+ return ComplexPairTy();
+ }
+ ComplexPairTy VisitExpr(Expr *S);
+ ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
+ ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
+
+ // l-values.
+ ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCImplicitSetterGetterRefExpr(
+ ObjCImplicitSetterGetterRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ return CGF.EmitObjCMessageExpr(E).getComplexVal();
+ }
+ ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+
+ // FIXME: CompoundLiteralExpr
+
+ ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy);
+ ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ // Unlike for scalars, we don't have to worry about function->ptr demotion
+ // here.
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCastExpr(CastExpr *E) {
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCallExpr(const CallExpr *E);
+ ComplexPairTy VisitStmtExpr(const StmtExpr *E);
+
+ // Operators.
+ ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre) {
+ LValue LV = CGF.EmitLValue(E->getSubExpr());
+ return CGF.EmitComplexPrePostIncDec(E, LV, isInc, isPre);
+ }
+ ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, false);
+ }
+ ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, false);
+ }
+ ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, true);
+ }
+ ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, true);
+ }
+ ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
+ ComplexPairTy VisitUnaryNot (const UnaryOperator *E);
+ // LNot,Real,Imag never return complex.
+ ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ return CGF.EmitCXXExprWithTemporaries(E).getComplexVal();
+ }
+ ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+ ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Constant *Null =
+ llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+
+ struct BinOpInfo {
+ ComplexPairTy LHS;
+ ComplexPairTy RHS;
+ QualType Ty; // Computation Type.
+ };
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &));
+
+ ComplexPairTy EmitBinAdd(const BinOpInfo &Op);
+ ComplexPairTy EmitBinSub(const BinOpInfo &Op);
+ ComplexPairTy EmitBinMul(const BinOpInfo &Op);
+ ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+
+ ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+ return EmitBinMul(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
+ return EmitBinAdd(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinSub(const BinaryOperator *E) {
+ return EmitBinSub(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
+ return EmitBinDiv(EmitBinOps(E));
+ }
+
+ // Compound assignments.
+ ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd);
+ }
+ ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub);
+ }
+ ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul);
+ }
+ ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv);
+ }
+
+ // GCC rejects rem/and/or/xor for integer complex.
+ // Logical and/or always return int, never complex.
+
+ // No comparisons produce a complex result.
+ ComplexPairTy VisitBinAssign (const BinaryOperator *E);
+ ComplexPairTy VisitBinComma (const BinaryOperator *E);
+
+
+ ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO);
+ ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
+
+ ComplexPairTy VisitInitListExpr(InitListExpr *E);
+
+ ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitLoadOfComplex - Given an RValue reference for a complex, emit code to
+/// load the real and imaginary pieces, returning them as Real/Imag.
+ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr,
+ bool isVolatile) {
+ llvm::Value *Real=0, *Imag=0;
+
+ if (!IgnoreReal) {
+ llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0,
+ SrcPtr->getName() + ".realp");
+ Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real");
+ }
+
+ if (!IgnoreImag) {
+ llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1,
+ SrcPtr->getName() + ".imagp");
+ Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag");
+ }
+ return ComplexPairTy(Real, Imag);
+}
+
+/// EmitStoreOfComplex - Store the specified real/imag parts into the
+/// specified value pointer.
+void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
+ bool isVolatile) {
+ llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real");
+ llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag");
+
+ Builder.CreateStore(Val.first, RealPtr, isVolatile);
+ Builder.CreateStore(Val.second, ImagPtr, isVolatile);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "complex expression");
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
+ llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
+ return
+ ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+}
+
+
+ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getComplexVal();
+}
+
+ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal();
+}
+
+/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
+ QualType SrcType,
+ QualType DestType) {
+ // Get the src/dest element type.
+ SrcType = SrcType->getAs<ComplexType>()->getElementType();
+ DestType = DestType->getAs<ComplexType>()->getElementType();
+
+ // C99 6.3.1.6: When a value of complex type is converted to another
+ // complex type, both the real and imaginary parts follow the conversion
+ // rules for the corresponding real types.
+ Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType);
+ Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType);
+ return Val;
+}
+
+ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
+ QualType DestTy) {
+ // Two cases here: cast from (complex to complex) and (scalar to complex).
+ if (Op->getType()->isAnyComplexType())
+ return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+
+ // FIXME: We should be looking at all of the cast kinds here, not
+ // cherry-picking the ones we have test cases for.
+ if (CK == CK_LValueBitCast) {
+ llvm::Value *V = CGF.EmitLValue(Op).getAddress();
+ V = Builder.CreateBitCast(V,
+ CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
+ // FIXME: Are the qualifiers correct here?
+ return EmitLoadOfComplex(V, DestTy.isVolatileQualified());
+ }
+
+ // C99 6.3.1.7: When a value of real type is converted to a complex type, the
+ // real part of the complex result value is determined by the rules of
+ // conversion to the corresponding real type and the imaginary part of the
+ // complex result value is a positive zero or an unsigned zero.
+ llvm::Value *Elt = CGF.EmitScalarExpr(Op);
+
+ // Convert the input element to the element type of the complex.
+ DestTy = DestTy->getAs<ComplexType>()->getElementType();
+ Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
+
+ // Return (realval, 0).
+ return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ ComplexPairTy Op = Visit(E->getSubExpr());
+
+ llvm::Value *ResR, *ResI;
+ if (Op.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFNeg(Op.first, "neg.r");
+ ResI = Builder.CreateFNeg(Op.second, "neg.i");
+ } else {
+ ResR = Builder.CreateNeg(Op.first, "neg.r");
+ ResI = Builder.CreateNeg(Op.second, "neg.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ // ~(a+ib) = a + i*-b
+ ComplexPairTy Op = Visit(E->getSubExpr());
+ llvm::Value *ResI;
+ if (Op.second->getType()->isFloatingPointTy())
+ ResI = Builder.CreateFNeg(Op.second, "conj.i");
+ else
+ ResI = Builder.CreateNeg(Op.second, "conj.i");
+
+ return ComplexPairTy(Op.first, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
+ llvm::Value *ResR, *ResI;
+
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ } else {
+ ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
+ llvm::Value *ResR, *ResI;
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ } else {
+ ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+
+ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
+ using llvm::Value;
+ Value *ResR, *ResI;
+
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ Value *ResRl = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ Value *ResRr = Builder.CreateFMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+ ResR = Builder.CreateFSub(ResRl, ResRr, "mul.r");
+
+ Value *ResIl = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ Value *ResIr = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ ResI = Builder.CreateFAdd(ResIl, ResIr, "mul.i");
+ } else {
+ Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+ ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
+
+ Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
+ llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
+ llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
+
+
+ llvm::Value *DSTr, *DSTi;
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr, "tmp"); // a*c
+ llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi, "tmp"); // b*d
+ llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2, "tmp"); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr, "tmp"); // c*c
+ llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi, "tmp"); // d*d
+ llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5, "tmp"); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr, "tmp"); // b*c
+ llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi, "tmp"); // a*d
+ llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8, "tmp"); // bc-ad
+
+ DSTr = Builder.CreateFDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateFDiv(Tmp9, Tmp6, "tmp");
+ } else {
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr, "tmp"); // a*c
+ llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi, "tmp"); // b*d
+ llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2, "tmp"); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr, "tmp"); // c*c
+ llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi, "tmp"); // d*d
+ llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5, "tmp"); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr, "tmp"); // b*c
+ llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi, "tmp"); // a*d
+ llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8, "tmp"); // bc-ad
+
+ if (Op.Ty->getAs<ComplexType>()->getElementType()->isUnsignedIntegerType()) {
+ DSTr = Builder.CreateUDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateUDiv(Tmp9, Tmp6, "tmp");
+ } else {
+ DSTr = Builder.CreateSDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateSDiv(Tmp9, Tmp6, "tmp");
+ }
+ }
+
+ return ComplexPairTy(DSTr, DSTi);
+}
+
+ComplexExprEmitter::BinOpInfo
+ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ BinOpInfo Ops;
+ Ops.LHS = Visit(E->getLHS());
+ Ops.RHS = Visit(E->getRHS());
+ Ops.Ty = E->getType();
+ return Ops;
+}
+
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ bool ignreal = TestAndClearIgnoreRealAssign();
+ bool ignimag = TestAndClearIgnoreImagAssign();
+ QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
+
+ BinOpInfo OpInfo;
+
+ // Load the RHS and LHS operands.
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen a little. It is possible for the RHS to be complex or
+ // scalar.
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.RHS = EmitCast(CK_Unknown, E->getRHS(), OpInfo.Ty);
+
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+ // We know the LHS is a complex lvalue.
+ ComplexPairTy LHSComplexPair;
+ if (LHS.isPropertyRef())
+ LHSComplexPair =
+ CGF.EmitObjCPropertyGet(LHS.getPropertyRefExpr()).getComplexVal();
+ else if (LHS.isKVCRef())
+ LHSComplexPair =
+ CGF.EmitObjCPropertyGet(LHS.getKVCRefExpr()).getComplexVal();
+ else
+ LHSComplexPair = EmitLoadOfComplex(LHS.getAddress(),
+ LHS.isVolatileQualified());
+
+ OpInfo.LHS = EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
+
+ // Expand the binary operator.
+ ComplexPairTy Result = (this->*Func)(OpInfo);
+
+ // Truncate the result back to the LHS type.
+ Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+
+ // Store the result value into the LHS lvalue.
+ if (LHS.isPropertyRef())
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
+ RValue::getComplex(Result));
+ else if (LHS.isKVCRef())
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Result));
+ else
+ EmitStoreOfComplex(Result, LHS.getAddress(), LHS.isVolatileQualified());
+
+ // Restore the Ignore* flags.
+ IgnoreReal = ignreal;
+ IgnoreImag = ignimag;
+ IgnoreRealAssign = ignreal;
+ IgnoreImagAssign = ignimag;
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return Result;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ bool ignreal = TestAndClearIgnoreRealAssign();
+ bool ignimag = TestAndClearIgnoreImagAssign();
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType()) &&
+ "Invalid assignment");
+ // Emit the RHS.
+ ComplexPairTy Val = Visit(E->getRHS());
+
+ // Compute the address to store into.
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Store the result value into the LHS lvalue.
+ if (LHS.isPropertyRef())
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Val));
+ else if (LHS.isKVCRef())
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Val));
+ else
+ EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
+
+ // Restore the Ignore* flags.
+ IgnoreReal = ignreal;
+ IgnoreImag = ignimag;
+ IgnoreRealAssign = ignreal;
+ IgnoreImagAssign = ignimag;
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return Val;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitStmt(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitConditionalOperator(const ConditionalOperator *E) {
+ if (!E->getLHS()) {
+ CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+ }
+
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ assert(E->getLHS() && "Must have LHS for complex value");
+
+ ComplexPairTy LHS = Visit(E->getLHS());
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+
+ ComplexPairTy RHS = Visit(E->getRHS());
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r");
+ RealPN->reserveOperandSpace(2);
+ RealPN->addIncoming(LHS.first, LHSBlock);
+ RealPN->addIncoming(RHS.first, RHSBlock);
+
+ // Create a PHI node for the imaginary part.
+ llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), "cond.i");
+ ImagPN->reserveOperandSpace(2);
+ ImagPN->addIncoming(LHS.second, LHSBlock);
+ ImagPN->addIncoming(RHS.second, RHSBlock);
+
+ return ComplexPairTy(RealPN, ImagPN);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreReal();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ Ignore = TestAndClearIgnoreImag();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ if (E->getNumInits())
+ return Visit(E->getInit(0));
+
+ // Empty init list intializes to null
+ QualType Ty = E->getType()->getAs<ComplexType>()->getElementType();
+ const llvm::Type* LTy = CGF.ConvertType(Ty);
+ llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
+ return ComplexPairTy(zeroConstant, zeroConstant);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(E, "complex va_arg expression");
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+ }
+
+ // FIXME Volatility.
+ return EmitLoadOfComplex(ArgPtr, false);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitComplexExpr - Emit the computation of the specified expression of
+/// complex type, ignoring the result.
+ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
+ bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+
+ return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign,
+ IgnoreImagAssign)
+ .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+/// of complex type, storing into the specified Value*.
+void CodeGenFunction::EmitComplexExprIntoAddr(const Expr *E,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+ ComplexExprEmitter Emitter(*this);
+ ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E));
+ Emitter.EmitStoreOfComplex(Val, DestAddr, DestIsVolatile);
+}
+
+/// StoreComplexToAddr - Store a complex number into the specified address.
+void CodeGenFunction::StoreComplexToAddr(ComplexPairTy V,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ ComplexExprEmitter(*this).EmitStoreOfComplex(V, DestAddr, DestIsVolatile);
+}
+
+/// LoadComplexFromAddr - Load a complex number from the specified address.
+ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
+ bool SrcIsVolatile) {
+ return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
new file mode 100644
index 0000000..9c31c2a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -0,0 +1,1122 @@
+//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Constant Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CGRecordLayout.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// ConstStructBuilder
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ConstStructBuilder {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+
+ bool Packed;
+ unsigned NextFieldOffsetInBytes;
+ unsigned LLVMStructAlignment;
+ std::vector<llvm::Constant *> Elements;
+public:
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ InitListExpr *ILE);
+
+private:
+ ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
+ : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
+ LLVMStructAlignment(1) { }
+
+ bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitExpr);
+
+ void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::ConstantInt *InitExpr);
+
+ void AppendPadding(uint64_t NumBytes);
+
+ void AppendTailPadding(uint64_t RecordSize);
+
+ void ConvertStructToPacked();
+
+ bool Build(InitListExpr *ILE);
+
+ unsigned getAlignment(const llvm::Constant *C) const {
+ if (Packed) return 1;
+ return CGM.getTargetData().getABITypeAlignment(C->getType());
+ }
+
+ uint64_t getSizeInBytes(const llvm::Constant *C) const {
+ return CGM.getTargetData().getTypeAllocSize(C->getType());
+ }
+};
+
+bool ConstStructBuilder::
+AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitCst) {
+ uint64_t FieldOffsetInBytes = FieldOffset / 8;
+
+ assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
+ && "Field offset mismatch!");
+
+ unsigned FieldAlignment = getAlignment(InitCst);
+
+ // Round up the field offset to the alignment of the field type.
+ uint64_t AlignedNextFieldOffsetInBytes =
+ llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+
+ if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
+ assert(!Packed && "Alignment is wrong even with a packed struct!");
+
+ // Convert the struct to a packed struct.
+ ConvertStructToPacked();
+
+ AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+ }
+
+ if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ // We need to append padding.
+ AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
+
+ assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
+ "Did not add enough padding!");
+
+ AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+ }
+
+ // Add the field.
+ Elements.push_back(InitCst);
+ NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes +
+ getSizeInBytes(InitCst);
+
+ if (Packed)
+ assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
+ else
+ LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
+
+ return true;
+}
+
+void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
+ uint64_t FieldOffset,
+ llvm::ConstantInt *CI) {
+ if (FieldOffset > NextFieldOffsetInBytes * 8) {
+ // We need to add padding.
+ uint64_t NumBytes =
+ llvm::RoundUpToAlignment(FieldOffset -
+ NextFieldOffsetInBytes * 8, 8) / 8;
+
+ AppendPadding(NumBytes);
+ }
+
+ uint64_t FieldSize =
+ Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
+
+ llvm::APInt FieldValue = CI->getValue();
+
+ // Promote the size of FieldValue if necessary
+ // FIXME: This should never occur, but currently it can because initializer
+ // constants are cast to bool, and because clang is not enforcing bitfield
+ // width limits.
+ if (FieldSize > FieldValue.getBitWidth())
+ FieldValue.zext(FieldSize);
+
+ // Truncate the size of FieldValue to the bit field size.
+ if (FieldSize < FieldValue.getBitWidth())
+ FieldValue.trunc(FieldSize);
+
+ if (FieldOffset < NextFieldOffsetInBytes * 8) {
+ // Either part of the field or the entire field can go into the previous
+ // byte.
+ assert(!Elements.empty() && "Elements can't be empty!");
+
+ unsigned BitsInPreviousByte =
+ NextFieldOffsetInBytes * 8 - FieldOffset;
+
+ bool FitsCompletelyInPreviousByte =
+ BitsInPreviousByte >= FieldValue.getBitWidth();
+
+ llvm::APInt Tmp = FieldValue;
+
+ if (!FitsCompletelyInPreviousByte) {
+ unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ Tmp = Tmp.lshr(NewFieldWidth);
+ Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining high bits.
+ FieldValue.trunc(NewFieldWidth);
+ } else {
+ Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining low bits.
+ FieldValue = FieldValue.lshr(BitsInPreviousByte);
+ FieldValue.trunc(NewFieldWidth);
+ }
+ }
+
+ Tmp.zext(8);
+ if (CGM.getTargetData().isBigEndian()) {
+ if (FitsCompletelyInPreviousByte)
+ Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
+ } else {
+ Tmp = Tmp.shl(8 - BitsInPreviousByte);
+ }
+
+ // 'or' in the bits that go into the previous byte.
+ llvm::Value *LastElt = Elements.back();
+ if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
+ Tmp |= Val->getValue();
+ else {
+ assert(isa<llvm::UndefValue>(LastElt));
+ // If there is an undef field that we're adding to, it can either be a
+ // scalar undef (in which case, we just replace it with our field) or it
+ // is an array. If it is an array, we have to pull one byte off the
+ // array so that the other undef bytes stay around.
+ if (!isa<llvm::IntegerType>(LastElt->getType())) {
+ // The undef padding will be a multibyte array, create a new smaller
+ // padding and then an hole for our i8 to get plopped into.
+ assert(isa<llvm::ArrayType>(LastElt->getType()) &&
+ "Expected array padding of undefs");
+ const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
+ assert(AT->getElementType()->isIntegerTy(8) &&
+ AT->getNumElements() != 0 &&
+ "Expected non-empty array padding of undefs");
+
+ // Remove the padding array.
+ NextFieldOffsetInBytes -= AT->getNumElements();
+ Elements.pop_back();
+
+ // Add the padding back in two chunks.
+ AppendPadding(AT->getNumElements()-1);
+ AppendPadding(1);
+ assert(isa<llvm::UndefValue>(Elements.back()) &&
+ Elements.back()->getType()->isIntegerTy(8) &&
+ "Padding addition didn't work right");
+ }
+ }
+
+ Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
+
+ if (FitsCompletelyInPreviousByte)
+ return;
+ }
+
+ while (FieldValue.getBitWidth() > 8) {
+ llvm::APInt Tmp;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ // We want the high bits.
+ Tmp = FieldValue;
+ Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
+ Tmp.trunc(8);
+ } else {
+ // We want the low bits.
+ Tmp = FieldValue;
+ Tmp.trunc(8);
+
+ FieldValue = FieldValue.lshr(8);
+ }
+
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
+ NextFieldOffsetInBytes++;
+
+ FieldValue.trunc(FieldValue.getBitWidth() - 8);
+ }
+
+ assert(FieldValue.getBitWidth() > 0 &&
+ "Should have at least one bit left!");
+ assert(FieldValue.getBitWidth() <= 8 &&
+ "Should not have more than a byte left!");
+
+ if (FieldValue.getBitWidth() < 8) {
+ if (CGM.getTargetData().isBigEndian()) {
+ unsigned BitWidth = FieldValue.getBitWidth();
+
+ FieldValue.zext(8);
+ FieldValue = FieldValue << (8 - BitWidth);
+ } else
+ FieldValue.zext(8);
+ }
+
+ // Append the last element.
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
+ FieldValue));
+ NextFieldOffsetInBytes++;
+}
+
+void ConstStructBuilder::AppendPadding(uint64_t NumBytes) {
+ if (!NumBytes)
+ return;
+
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ if (NumBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+ llvm::Constant *C = llvm::UndefValue::get(Ty);
+ Elements.push_back(C);
+ assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
+
+ NextFieldOffsetInBytes += getSizeInBytes(C);
+}
+
+void ConstStructBuilder::AppendTailPadding(uint64_t RecordSize) {
+ assert(RecordSize % 8 == 0 && "Invalid record size!");
+
+ uint64_t RecordSizeInBytes = RecordSize / 8;
+ assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
+
+ unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
+ AppendPadding(NumPadBytes);
+}
+
+void ConstStructBuilder::ConvertStructToPacked() {
+ std::vector<llvm::Constant *> PackedElements;
+ uint64_t ElementOffsetInBytes = 0;
+
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ llvm::Constant *C = Elements[i];
+
+ unsigned ElementAlign =
+ CGM.getTargetData().getABITypeAlignment(C->getType());
+ uint64_t AlignedElementOffsetInBytes =
+ llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
+
+ if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
+ // We need some padding.
+ uint64_t NumBytes =
+ AlignedElementOffsetInBytes - ElementOffsetInBytes;
+
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ if (NumBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+ llvm::Constant *Padding = llvm::UndefValue::get(Ty);
+ PackedElements.push_back(Padding);
+ ElementOffsetInBytes += getSizeInBytes(Padding);
+ }
+
+ PackedElements.push_back(C);
+ ElementOffsetInBytes += getSizeInBytes(C);
+ }
+
+ assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
+ "Packing the struct changed its size!");
+
+ Elements = PackedElements;
+ LLVMStructAlignment = 1;
+ Packed = true;
+}
+
+bool ConstStructBuilder::Build(InitListExpr *ILE) {
+ RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ unsigned FieldNo = 0;
+ unsigned ElementNo = 0;
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isBitField() && !Field->getIdentifier())
+ continue;
+
+ // Get the initializer. A struct can include fields without initializers,
+ // we just use explicit null values for them.
+ llvm::Constant *EltInit;
+ if (ElementNo < ILE->getNumInits())
+ EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
+ Field->getType(), CGF);
+ else
+ EltInit = CGM.EmitNullConstant(Field->getType());
+
+ if (!EltInit)
+ return false;
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
+ return false;
+ } else {
+ // Otherwise we have a bitfield.
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
+ cast<llvm::ConstantInt>(EltInit));
+ }
+ }
+
+ uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
+
+ if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
+ // If the struct is bigger than the size of the record type,
+ // we must have a flexible array member at the end.
+ assert(RD->hasFlexibleArrayMember() &&
+ "Must have flexible array member if struct is bigger than type!");
+
+ // No tail padding is necessary.
+ return true;
+ }
+
+ uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
+ LLVMStructAlignment);
+
+ // Check if we need to convert the struct to a packed struct.
+ if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
+ LLVMSizeInBytes > LayoutSizeInBytes) {
+ assert(!Packed && "Size mismatch!");
+
+ ConvertStructToPacked();
+ assert(NextFieldOffsetInBytes <= LayoutSizeInBytes &&
+ "Converting to packed did not help!");
+ }
+
+ // Append tail padding if necessary.
+ AppendTailPadding(Layout.getSize());
+
+ assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
+ "Tail padding mismatch!");
+
+ return true;
+}
+
+llvm::Constant *ConstStructBuilder::
+ BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ if (!Builder.Build(ILE))
+ return 0;
+
+ llvm::Constant *Result =
+ llvm::ConstantStruct::get(CGM.getLLVMContext(),
+ Builder.Elements, Builder.Packed);
+
+ assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
+ Builder.getAlignment(Result)) ==
+ Builder.getSizeInBytes(Result) && "Size mismatch!");
+
+ return Result;
+}
+
+
+//===----------------------------------------------------------------------===//
+// ConstExprEmitter
+//===----------------------------------------------------------------------===//
+
+class ConstExprEmitter :
+ public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+ llvm::LLVMContext &VMContext;
+public:
+ ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
+ : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ llvm::Constant *VisitStmt(Stmt *S) {
+ return 0;
+ }
+
+ llvm::Constant *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
+
+ llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return Visit(E->getInitializer());
+ }
+
+ llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
+ if (const MemberPointerType *MPT =
+ E->getType()->getAs<MemberPointerType>()) {
+ DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
+ NamedDecl *ND = DRE->getDecl();
+ if (MPT->isMemberFunctionPointer())
+ return CGM.getCXXABI().EmitMemberPointer(cast<CXXMethodDecl>(ND));
+ else
+ return CGM.getCXXABI().EmitMemberPointer(cast<FieldDecl>(ND));
+ }
+
+ return 0;
+ }
+
+ llvm::Constant *VisitBinSub(BinaryOperator *E) {
+ // This must be a pointer/pointer subtraction. This only happens for
+ // address of label.
+ if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
+ !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
+ return 0;
+
+ llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
+ E->getLHS()->getType(), CGF);
+ llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
+ E->getRHS()->getType(), CGF);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
+ RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
+
+ // No need to divide by element size, since addr of label is always void*,
+ // which has size 1 in GNUish.
+ return llvm::ConstantExpr::getSub(LHS, RHS);
+ }
+
+ llvm::Constant *VisitCastExpr(CastExpr* E) {
+ switch (E->getCastKind()) {
+ case CK_ToUnion: {
+ // GCC cast to union extension
+ assert(E->getType()->isUnionType() &&
+ "Destination type is not union type!");
+ const llvm::Type *Ty = ConvertType(E->getType());
+ Expr *SubExpr = E->getSubExpr();
+
+ llvm::Constant *C =
+ CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
+ if (!C)
+ return 0;
+
+ // Build a struct with the union sub-element as the first member,
+ // and padded to the appropriate size
+ std::vector<llvm::Constant*> Elts;
+ std::vector<const llvm::Type*> Types;
+ Elts.push_back(C);
+ Types.push_back(C->getType());
+ unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
+ unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
+
+ assert(CurSize <= TotalSize && "Union size mismatch!");
+ if (unsigned NumPadBytes = TotalSize - CurSize) {
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ if (NumPadBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPadBytes);
+
+ Elts.push_back(llvm::UndefValue::get(Ty));
+ Types.push_back(Ty);
+ }
+
+ llvm::StructType* STy =
+ llvm::StructType::get(C->getType()->getContext(), Types, false);
+ return llvm::ConstantStruct::get(STy, Elts);
+ }
+ case CK_NullToMemberPointer: {
+ const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
+ return CGM.getCXXABI().EmitNullMemberPointer(MPT);
+ }
+
+ case CK_BaseToDerivedMemberPointer: {
+ Expr *SubExpr = E->getSubExpr();
+ llvm::Constant *C =
+ CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
+ if (!C) return 0;
+
+ return CGM.getCXXABI().EmitMemberPointerConversion(C, E);
+ }
+
+ case CK_BitCast:
+ // This must be a member function pointer cast.
+ return Visit(E->getSubExpr());
+
+ default: {
+ // FIXME: This should be handled by the CK_NoOp cast kind.
+ // Explicit and implicit no-op casts
+ QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
+ if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
+ return Visit(E->getSubExpr());
+
+ // Handle integer->integer casts for address-of-label differences.
+ if (Ty->isIntegerType() && SubTy->isIntegerType() &&
+ CGF) {
+ llvm::Value *Src = Visit(E->getSubExpr());
+ if (Src == 0) return 0;
+
+ // Use EmitScalarConversion to perform the conversion.
+ return cast<llvm::Constant>(CGF->EmitScalarConversion(Src, SubTy, Ty));
+ }
+
+ return 0;
+ }
+ }
+ }
+
+ llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+
+ llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
+ unsigned NumInitElements = ILE->getNumInits();
+ if (NumInitElements == 1 &&
+ (isa<StringLiteral>(ILE->getInit(0)) ||
+ isa<ObjCEncodeExpr>(ILE->getInit(0))))
+ return Visit(ILE->getInit(0));
+
+ std::vector<llvm::Constant*> Elts;
+ const llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(ConvertType(ILE->getType()));
+ const llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumElements = AType->getNumElements();
+
+ // Initialising an array requires us to automatically
+ // initialise any elements that have not been initialised explicitly
+ unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+
+ // Copy initializer elements.
+ unsigned i = 0;
+ bool RewriteType = false;
+ for (; i < NumInitableElts; ++i) {
+ Expr *Init = ILE->getInit(i);
+ llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ if (!C)
+ return 0;
+ RewriteType |= (C->getType() != ElemTy);
+ Elts.push_back(C);
+ }
+
+ // Initialize remaining array elements.
+ // FIXME: This doesn't handle member pointers correctly!
+ for (; i < NumElements; ++i)
+ Elts.push_back(llvm::Constant::getNullValue(ElemTy));
+
+ if (RewriteType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<const llvm::Type*> Types;
+ for (unsigned i = 0; i < Elts.size(); ++i)
+ Types.push_back(Elts[i]->getType());
+ const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+ Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+
+ llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+ }
+
+ llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+ }
+
+ llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
+ return CGM.EmitNullConstant(E->getType());
+ }
+
+ llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->getType()->isScalarType()) {
+ // We have a scalar in braces. Just use the first element.
+ if (ILE->getNumInits() > 0) {
+ Expr *Init = ILE->getInit(0);
+ return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ }
+ return CGM.EmitNullConstant(ILE->getType());
+ }
+
+ if (ILE->getType()->isArrayType())
+ return EmitArrayInitialization(ILE);
+
+ if (ILE->getType()->isRecordType())
+ return EmitStructInitialization(ILE);
+
+ if (ILE->getType()->isUnionType())
+ return EmitUnionInitialization(ILE);
+
+ // If ILE was a constant vector, we would have handled it already.
+ if (ILE->getType()->isVectorType())
+ return 0;
+
+ assert(0 && "Unable to handle InitListExpr");
+ // Get rid of control reaches end of void function warning.
+ // Not reached.
+ return 0;
+ }
+
+ llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (!E->getConstructor()->isTrivial())
+ return 0;
+
+ QualType Ty = E->getType();
+
+ // FIXME: We should not have to call getBaseElementType here.
+ const RecordType *RT =
+ CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ // If the class doesn't have a trivial destructor, we can't emit it as a
+ // constant expr.
+ if (!RD->hasTrivialDestructor())
+ return 0;
+
+ // Only copy and default constructors can be trivial.
+
+
+ if (E->getNumArgs()) {
+ assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
+ assert(E->getConstructor()->isCopyConstructor() &&
+ "trivial ctor has argument but isn't a copy ctor");
+
+ Expr *Arg = E->getArg(0);
+ assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
+ "argument to copy ctor is of wrong type");
+
+ return Visit(Arg);
+ }
+
+ return CGM.EmitNullConstant(Ty);
+ }
+
+ llvm::Constant *VisitStringLiteral(StringLiteral *E) {
+ assert(!E->getType()->isPointerType() && "Strings are always arrays");
+
+ // This must be a string initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ return llvm::ConstantArray::get(VMContext,
+ CGM.GetStringForStringLiteral(E), false);
+ }
+
+ llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ // This must be an @encode initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ std::string Str;
+ CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
+
+ // Resize the string to the right size, adding zeros at the end, or
+ // truncating as needed.
+ Str.resize(CAT->getSize().getZExtValue(), '\0');
+ return llvm::ConstantArray::get(VMContext, Str, false);
+ }
+
+ llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // Utility methods
+ const llvm::Type *ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+ }
+
+public:
+ llvm::Constant *EmitLValue(Expr *E) {
+ switch (E->getStmtClass()) {
+ default: break;
+ case Expr::CompoundLiteralExprClass: {
+ // Note that due to the nature of compound literals, this is guaranteed
+ // to be the only use of the variable, so we just generate it here.
+ CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ llvm::Constant* C = Visit(CLE->getInitializer());
+ // FIXME: "Leaked" on failure.
+ if (C)
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ E->getType().isConstant(CGM.getContext()),
+ llvm::GlobalValue::InternalLinkage,
+ C, ".compoundliteral", 0, false,
+ E->getType().getAddressSpace());
+ return C;
+ }
+ case Expr::DeclRefExprClass: {
+ ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
+ if (Decl->hasAttr<WeakRefAttr>())
+ return CGM.GetWeakRefReference(Decl);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
+ return CGM.GetAddrOfFunction(FD);
+ if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
+ // We can never refer to a variable with local storage.
+ if (!VD->hasLocalStorage()) {
+ if (VD->isFileVarDecl() || VD->hasExternalStorage())
+ return CGM.GetAddrOfGlobalVar(VD);
+ else if (VD->isBlockVarDecl()) {
+ assert(CGF && "Can't access static local vars without CGF");
+ return CGF->GetAddrOfStaticLocalVar(VD);
+ }
+ }
+ }
+ break;
+ }
+ case Expr::StringLiteralClass:
+ return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
+ case Expr::ObjCStringLiteralClass: {
+ ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(SL->getString());
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+ }
+ case Expr::PredefinedExprClass: {
+ unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
+ if (CGF) {
+ LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ return cast<llvm::Constant>(Res.getAddress());
+ } else if (Type == PredefinedExpr::PrettyFunction) {
+ return CGM.GetAddrOfConstantCString("top level", ".tmp");
+ }
+
+ return CGM.GetAddrOfConstantCString("", ".tmp");
+ }
+ case Expr::AddrLabelExprClass: {
+ assert(CGF && "Invalid address of label expression outside function.");
+ llvm::Constant *Ptr =
+ CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
+ return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+ }
+ case Expr::CallExprClass: {
+ CallExpr* CE = cast<CallExpr>(E);
+ unsigned builtin = CE->isBuiltinCall(CGM.getContext());
+ if (builtin !=
+ Builtin::BI__builtin___CFStringMakeConstantString &&
+ builtin !=
+ Builtin::BI__builtin___NSStringMakeConstantString)
+ break;
+ const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
+ const StringLiteral *Literal = cast<StringLiteral>(Arg);
+ if (builtin ==
+ Builtin::BI__builtin___NSStringMakeConstantString) {
+ return CGM.getObjCRuntime().GenerateConstantString(Literal);
+ }
+ // FIXME: need to deal with UCN conversion issues.
+ return CGM.GetAddrOfConstantCFString(Literal);
+ }
+ case Expr::BlockExprClass: {
+ std::string FunctionName;
+ if (CGF)
+ FunctionName = CGF->CurFn->getName();
+ else
+ FunctionName = "global";
+
+ return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ }
+ }
+
+ return 0;
+ }
+};
+
+} // end anonymous namespace.
+
+llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ Expr::EvalResult Result;
+
+ bool Success = false;
+
+ if (DestType->isReferenceType())
+ Success = E->EvaluateAsLValue(Result, Context);
+ else
+ Success = E->Evaluate(Result, Context);
+
+ if (Success && !Result.HasSideEffects) {
+ switch (Result.Val.getKind()) {
+ case APValue::Uninitialized:
+ assert(0 && "Constant expressions should be initialized.");
+ return 0;
+ case APValue::LValue: {
+ const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ Result.Val.getLValueOffset().getQuantity());
+
+ llvm::Constant *C;
+ if (const Expr *LVBase = Result.Val.getLValueBase()) {
+ C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
+
+ // Apply offset if necessary.
+ if (!Offset->isNullValue()) {
+ const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
+ Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
+ C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
+ }
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getBitCast(C, DestTy);
+
+ return llvm::ConstantExpr::getPtrToInt(C, DestTy);
+ } else {
+ C = Offset;
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+
+ // If the types don't match this should only be a truncate.
+ if (C->getType() != DestTy)
+ return llvm::ConstantExpr::getTrunc(C, DestTy);
+
+ return C;
+ }
+ }
+ case APValue::Int: {
+ llvm::Constant *C = llvm::ConstantInt::get(VMContext,
+ Result.Val.getInt());
+
+ if (C->getType()->isIntegerTy(1)) {
+ const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+ }
+ case APValue::ComplexInt: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantInt::get(VMContext,
+ Result.Val.getComplexIntReal());
+ Complex[1] = llvm::ConstantInt::get(VMContext,
+ Result.Val.getComplexIntImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
+ }
+ case APValue::Float:
+ return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
+ case APValue::ComplexFloat: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantFP::get(VMContext,
+ Result.Val.getComplexFloatReal());
+ Complex[1] = llvm::ConstantFP::get(VMContext,
+ Result.Val.getComplexFloatImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
+ }
+ case APValue::Vector: {
+ llvm::SmallVector<llvm::Constant *, 4> Inits;
+ unsigned NumElts = Result.Val.getVectorLength();
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ APValue &Elt = Result.Val.getVectorElt(i);
+ if (Elt.isInt())
+ Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
+ else
+ Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
+ }
+ return llvm::ConstantVector::get(&Inits[0], Inits.size());
+ }
+ }
+ }
+
+ llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+ if (C && C->getType()->isIntegerTy(1)) {
+ const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+static void
+FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
+ std::vector<llvm::Constant *> &Elements,
+ uint64_t StartOffset) {
+ assert(StartOffset % 8 == 0 && "StartOffset not byte aligned!");
+
+ if (CGM.getTypes().isZeroInitializable(T))
+ return;
+
+ if (const ConstantArrayType *CAT =
+ CGM.getContext().getAsConstantArrayType(T)) {
+ QualType ElementTy = CAT->getElementType();
+ uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
+
+ for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
+ FillInNullDataMemberPointers(CGM, ElementTy, Elements,
+ StartOffset + I * ElementSize);
+ }
+ } else if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ // Go through all bases and fill in any null pointer to data members.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual()) {
+ // FIXME: We should initialize null pointer to data members in virtual
+ // bases here.
+ continue;
+ }
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (BaseDecl->isEmpty())
+ continue;
+
+ // Ignore bases that don't have any pointer to data members.
+ if (CGM.getTypes().isZeroInitializable(BaseDecl))
+ continue;
+
+ uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ FillInNullDataMemberPointers(CGM, I->getType(),
+ Elements, StartOffset + BaseOffset);
+ }
+
+ // Visit all fields.
+ unsigned FieldNo = 0;
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I, ++FieldNo) {
+ QualType FieldType = I->getType();
+
+ if (CGM.getTypes().isZeroInitializable(FieldType))
+ continue;
+
+ uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
+ FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
+ }
+ } else {
+ assert(T->isMemberPointerType() && "Should only see member pointers here!");
+ assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
+ "Should only see pointers to data members here!");
+
+ uint64_t StartIndex = StartOffset / 8;
+ uint64_t EndIndex = StartIndex + CGM.getContext().getTypeSize(T) / 8;
+
+ llvm::Constant *NegativeOne =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
+ -1ULL, /*isSigned=*/true);
+
+ // Fill in the null data member pointer.
+ for (uint64_t I = StartIndex; I != EndIndex; ++I)
+ Elements[I] = NegativeOne;
+ }
+}
+
+llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
+ if (getTypes().isZeroInitializable(T))
+ return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
+
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
+
+ QualType ElementTy = CAT->getElementType();
+
+ llvm::Constant *Element = EmitNullConstant(ElementTy);
+ unsigned NumElements = CAT->getSize().getZExtValue();
+ std::vector<llvm::Constant *> Array(NumElements);
+ for (unsigned i = 0; i != NumElements; ++i)
+ Array[i] = Element;
+
+ const llvm::ArrayType *ATy =
+ cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
+ return llvm::ConstantArray::get(ATy, Array);
+ }
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertTypeForMem(T));
+ unsigned NumElements = STy->getNumElements();
+ std::vector<llvm::Constant *> Elements(NumElements);
+
+ const CGRecordLayout &Layout = getTypes().getCGRecordLayout(RD);
+
+ // Go through all bases and fill in any null pointer to data members.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual()) {
+ // FIXME: We should initialize null pointer to data members in virtual
+ // bases here.
+ continue;
+ }
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (BaseDecl->isEmpty())
+ continue;
+
+ // Ignore bases that don't have any pointer to data members.
+ if (getTypes().isZeroInitializable(BaseDecl))
+ continue;
+
+ // Currently, all bases are arrays of i8. Figure out how many elements
+ // this base array has.
+ unsigned BaseFieldNo = Layout.getNonVirtualBaseLLVMFieldNo(BaseDecl);
+ const llvm::ArrayType *BaseArrayTy =
+ cast<llvm::ArrayType>(STy->getElementType(BaseFieldNo));
+
+ unsigned NumBaseElements = BaseArrayTy->getNumElements();
+ std::vector<llvm::Constant *> BaseElements(NumBaseElements);
+
+ // Now fill in null data member pointers.
+ FillInNullDataMemberPointers(*this, I->getType(), BaseElements, 0);
+
+ // Now go through all other elements and zero them out.
+ if (NumBaseElements) {
+ llvm::Constant *Zero =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(getLLVMContext()), 0);
+
+ for (unsigned I = 0; I != NumBaseElements; ++I) {
+ if (!BaseElements[I])
+ BaseElements[I] = Zero;
+ }
+ }
+
+ Elements[BaseFieldNo] = llvm::ConstantArray::get(BaseArrayTy,
+ BaseElements);
+ }
+
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I) {
+ const FieldDecl *FD = *I;
+
+ // Ignore bit fields.
+ if (FD->isBitField())
+ continue;
+
+ unsigned FieldNo = Layout.getLLVMFieldNo(FD);
+ Elements[FieldNo] = EmitNullConstant(FD->getType());
+ }
+
+ // Now go through all other fields and zero them out.
+ for (unsigned i = 0; i != NumElements; ++i) {
+ if (!Elements[i])
+ Elements[i] = llvm::Constant::getNullValue(STy->getElementType(i));
+ }
+
+ return llvm::ConstantStruct::get(STy, Elements);
+ }
+
+ assert(T->isMemberPointerType() && "Should only see member pointers here!");
+ assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
+ "Should only see pointers to data members here!");
+
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ return llvm::ConstantInt::get(getTypes().ConvertTypeForMem(T), -1ULL,
+ /*isSigned=*/true);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
new file mode 100644
index 0000000..2318cc4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -0,0 +1,2280 @@
+//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::Value;
+
+//===----------------------------------------------------------------------===//
+// Scalar Expression Emitter
+//===----------------------------------------------------------------------===//
+
+struct BinOpInfo {
+ Value *LHS;
+ Value *RHS;
+ QualType Ty; // Computation Type.
+ BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
+ const Expr *E; // Entire expr, for error unsupported. May not be binop.
+};
+
+namespace {
+class ScalarExprEmitter
+ : public StmtVisitor<ScalarExprEmitter, Value*> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ bool IgnoreResultAssign;
+ llvm::LLVMContext &VMContext;
+public:
+
+ ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
+ VMContext(cgf.getLLVMContext()) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreResultAssign() {
+ bool I = IgnoreResultAssign;
+ IgnoreResultAssign = false;
+ return I;
+ }
+
+ const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
+ LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
+ LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
+
+ Value *EmitLoadOfLValue(LValue LV, QualType T) {
+ return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ Value *EmitLoadOfLValue(const Expr *E) {
+ return EmitLoadOfLValue(EmitCheckedLValue(E), E->getType());
+ }
+
+ /// EmitConversionToBool - Convert the specified expression value to a
+ /// boolean (i1) truth value. This is equivalent to "Val != 0".
+ Value *EmitConversionToBool(Value *Src, QualType DstTy);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
+ Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy);
+
+ /// EmitNullValue - Emit a value that corresponds to null for the given type.
+ Value *EmitNullValue(QualType Ty);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ Value *VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ assert(0 && "Stmt can't have complex result type!");
+ return 0;
+ }
+ Value *VisitExpr(Expr *S);
+
+ Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); }
+
+ // Leaves.
+ Value *VisitIntegerLiteral(const IntegerLiteral *E) {
+ return llvm::ConstantInt::get(VMContext, E->getValue());
+ }
+ Value *VisitFloatingLiteral(const FloatingLiteral *E) {
+ return llvm::ConstantFP::get(VMContext, E->getValue());
+ }
+ Value *VisitCharacterLiteral(const CharacterLiteral *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitGNUNullExpr(const GNUNullExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()),
+ CGF.getContext().typesAreCompatible(
+ E->getArgType1(), E->getArgType2()));
+ }
+ Value *VisitOffsetOfExpr(OffsetOfExpr *E);
+ Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
+ Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
+ llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
+ return Builder.CreateBitCast(V, ConvertType(E->getType()));
+ }
+
+ // l-values.
+ Value *VisitDeclRefExpr(DeclRefExpr *E) {
+ Expr::EvalResult Result;
+ if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
+ assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
+ llvm::ConstantInt *CI
+ = llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+ CGF.EmitDeclRefExprDbgValue(E, CI);
+ return CI;
+ }
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ return CGF.EmitObjCSelectorExpr(E);
+ }
+ Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ return CGF.EmitObjCProtocolExpr(E);
+ }
+ Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCImplicitSetterGetterRefExpr(
+ ObjCImplicitSetterGetterRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ return CGF.EmitObjCMessageExpr(E).getScalarVal();
+ }
+
+ Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
+ LValue LV = CGF.EmitObjCIsaExpr(E);
+ Value *V = CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal();
+ return V;
+ }
+
+ Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ Value *VisitMemberExpr(MemberExpr *E);
+ Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitInitListExpr(InitListExpr *E);
+
+ Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return CGF.CGM.EmitNullConstant(E->getType());
+ }
+ Value *VisitCastExpr(CastExpr *E) {
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (E->getType()->isVariablyModifiedType())
+ CGF.EmitVLASize(E->getType());
+
+ return EmitCastExpr(E);
+ }
+ Value *EmitCastExpr(CastExpr *E);
+
+ Value *VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getScalarVal();
+ }
+
+ Value *VisitStmtExpr(const StmtExpr *E);
+
+ Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
+
+ // Unary Operators.
+ Value *VisitUnaryPostDec(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, false);
+ }
+ Value *VisitUnaryPostInc(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, false);
+ }
+ Value *VisitUnaryPreDec(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, true);
+ }
+ Value *VisitUnaryPreInc(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, true);
+ }
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+
+
+ Value *VisitUnaryAddrOf(const UnaryOperator *E) {
+ // If the sub-expression is an instance member reference,
+ // EmitDeclRefLValue will magically emit it with the appropriate
+ // value as the "address".
+ return EmitLValue(E->getSubExpr()).getAddress();
+ }
+ Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitUnaryPlus(const UnaryOperator *E) {
+ // This differs from gcc, though, most likely due to a bug in gcc.
+ TestAndClearIgnoreResultAssign();
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitUnaryMinus (const UnaryOperator *E);
+ Value *VisitUnaryNot (const UnaryOperator *E);
+ Value *VisitUnaryLNot (const UnaryOperator *E);
+ Value *VisitUnaryReal (const UnaryOperator *E);
+ Value *VisitUnaryImag (const UnaryOperator *E);
+ Value *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // C++
+ Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ Value *VisitCXXThisExpr(CXXThisExpr *TE) {
+ return CGF.LoadCXXThis();
+ }
+
+ Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ return CGF.EmitCXXExprWithTemporaries(E).getScalarVal();
+ }
+ Value *VisitCXXNewExpr(const CXXNewExpr *E) {
+ return CGF.EmitCXXNewExpr(E);
+ }
+ Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ CGF.EmitCXXDeleteExpr(E);
+ return 0;
+ }
+ Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt1Ty(),
+ E->EvaluateTrait(CGF.getContext()));
+ }
+
+ Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ CGF.EmitScalarExpr(E->getBase());
+ return 0;
+ }
+
+ Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+
+ Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
+ CGF.EmitCXXThrowExpr(E);
+ return 0;
+ }
+
+ // Binary Operators.
+ Value *EmitMul(const BinOpInfo &Ops) {
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ }
+ /// Create a binary op that checks for overflow.
+ /// Currently only supports +, - and *.
+ Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
+ Value *EmitDiv(const BinOpInfo &Ops);
+ Value *EmitRem(const BinOpInfo &Ops);
+ Value *EmitAdd(const BinOpInfo &Ops);
+ Value *EmitSub(const BinOpInfo &Ops);
+ Value *EmitShl(const BinOpInfo &Ops);
+ Value *EmitShr(const BinOpInfo &Ops);
+ Value *EmitAnd(const BinOpInfo &Ops) {
+ return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
+ }
+ Value *EmitXor(const BinOpInfo &Ops) {
+ return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
+ }
+ Value *EmitOr (const BinOpInfo &Ops) {
+ return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
+ }
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
+ Value *&Result);
+
+ Value *EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
+
+ // Binary operators and binary compound assignment operators.
+#define HANDLEBINOP(OP) \
+ Value *VisitBin ## OP(const BinaryOperator *E) { \
+ return Emit ## OP(EmitBinOps(E)); \
+ } \
+ Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
+ return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
+ }
+ HANDLEBINOP(Mul)
+ HANDLEBINOP(Div)
+ HANDLEBINOP(Rem)
+ HANDLEBINOP(Add)
+ HANDLEBINOP(Sub)
+ HANDLEBINOP(Shl)
+ HANDLEBINOP(Shr)
+ HANDLEBINOP(And)
+ HANDLEBINOP(Xor)
+ HANDLEBINOP(Or)
+#undef HANDLEBINOP
+
+ // Comparisons.
+ Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc);
+#define VISITCOMP(CODE, UI, SI, FP) \
+ Value *VisitBin##CODE(const BinaryOperator *E) { \
+ return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
+ llvm::FCmpInst::FP); }
+ VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
+ VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
+ VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
+ VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
+ VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
+ VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
+#undef VISITCOMP
+
+ Value *VisitBinAssign (const BinaryOperator *E);
+
+ Value *VisitBinLAnd (const BinaryOperator *E);
+ Value *VisitBinLOr (const BinaryOperator *E);
+ Value *VisitBinComma (const BinaryOperator *E);
+
+ Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
+
+ // Other Operators.
+ Value *VisitBlockExpr(const BlockExpr *BE);
+ Value *VisitConditionalOperator(const ConditionalOperator *CO);
+ Value *VisitChooseExpr(ChooseExpr *CE);
+ Value *VisitVAArgExpr(VAArgExpr *VE);
+ Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
+ return CGF.EmitObjCStringLiteral(E);
+ }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitConversionToBool - Convert the specified expression value to a
+/// boolean (i1) truth value. This is equivalent to "Val != 0".
+Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
+ assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
+
+ if (SrcType->isRealFloatingType()) {
+ // Compare against 0.0 for fp scalars.
+ llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
+ return Builder.CreateFCmpUNE(Src, Zero, "tobool");
+ }
+
+ if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
+ return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
+
+ assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
+ "Unknown scalar type to convert");
+
+ // Because of the type rules of C, we often end up computing a logical value,
+ // then zero extending it to int, then wanting it as a logical value again.
+ // Optimize this common case.
+ if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
+ if (ZI->getOperand(0)->getType() ==
+ llvm::Type::getInt1Ty(CGF.getLLVMContext())) {
+ Value *Result = ZI->getOperand(0);
+ // If there aren't any more uses, zap the instruction to save space.
+ // Note that there can be more uses, for example if this
+ // is the result of an assignment.
+ if (ZI->use_empty())
+ ZI->eraseFromParent();
+ return Result;
+ }
+ }
+
+ // Compare against an integer or pointer null.
+ llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
+ return Builder.CreateICmpNE(Src, Zero, "tobool");
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
+ QualType DstType) {
+ SrcType = CGF.getContext().getCanonicalType(SrcType);
+ DstType = CGF.getContext().getCanonicalType(DstType);
+ if (SrcType == DstType) return Src;
+
+ if (DstType->isVoidType()) return 0;
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstType->isBooleanType())
+ return EmitConversionToBool(Src, SrcType);
+
+ const llvm::Type *DstTy = ConvertType(DstType);
+
+ // Ignore conversions like int -> uint.
+ if (Src->getType() == DstTy)
+ return Src;
+
+ // Handle pointer conversions next: pointers can only be converted to/from
+ // other pointers and integers. Check for pointer types in terms of LLVM, as
+ // some native types (like Obj-C id) may map to a pointer type.
+ if (isa<llvm::PointerType>(DstTy)) {
+ // The source value may be an integer, or a pointer.
+ if (isa<llvm::PointerType>(Src->getType()))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ const llvm::Type *MiddleTy = CGF.IntPtrTy;
+ bool InputSigned = SrcType->isSignedIntegerType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+ // Then, cast to pointer.
+ return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
+ }
+
+ if (isa<llvm::PointerType>(Src->getType())) {
+ // Must be an ptr to int cast.
+ assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
+ return Builder.CreatePtrToInt(Src, DstTy, "conv");
+ }
+
+ // A scalar can be splatted to an extended vector of the same element type
+ if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
+ // Cast the scalar to element type
+ QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType();
+ llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
+
+ // Insert the element in element zero of an undef vector
+ llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+ llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
+
+ // Splat the element across to all elements
+ llvm::SmallVector<llvm::Constant*, 16> Args;
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ for (unsigned i = 0; i < NumElements; i++)
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+ llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+ return Yay;
+ }
+
+ // Allow bitcast from vector to integer/fp of the same size.
+ if (isa<llvm::VectorType>(Src->getType()) ||
+ isa<llvm::VectorType>(DstTy))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ // Finally, we have the arithmetic types: real int/float.
+ if (isa<llvm::IntegerType>(Src->getType())) {
+ bool InputSigned = SrcType->isSignedIntegerType();
+ if (isa<llvm::IntegerType>(DstTy))
+ return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ else if (InputSigned)
+ return Builder.CreateSIToFP(Src, DstTy, "conv");
+ else
+ return Builder.CreateUIToFP(Src, DstTy, "conv");
+ }
+
+ assert(Src->getType()->isFloatingPointTy() && "Unknown real conversion");
+ if (isa<llvm::IntegerType>(DstTy)) {
+ if (DstType->isSignedIntegerType())
+ return Builder.CreateFPToSI(Src, DstTy, "conv");
+ else
+ return Builder.CreateFPToUI(Src, DstTy, "conv");
+ }
+
+ assert(DstTy->isFloatingPointTy() && "Unknown real conversion");
+ if (DstTy->getTypeID() < Src->getType()->getTypeID())
+ return Builder.CreateFPTrunc(Src, DstTy, "conv");
+ else
+ return Builder.CreateFPExt(Src, DstTy, "conv");
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
+Value *ScalarExprEmitter::
+EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy) {
+ // Get the source element type.
+ SrcTy = SrcTy->getAs<ComplexType>()->getElementType();
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstTy->isBooleanType()) {
+ // Complex != 0 -> (Real != 0) | (Imag != 0)
+ Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy);
+ Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
+ return Builder.CreateOr(Src.first, Src.second, "tobool");
+ }
+
+ // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
+ // the imaginary part of the complex value is discarded and the value of the
+ // real part is converted according to the conversion rules for the
+ // corresponding real type.
+ return EmitScalarConversion(Src.first, SrcTy, DstTy);
+}
+
+Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
+ if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>())
+ return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
+
+ return llvm::Constant::getNullValue(ConvertType(Ty));
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "scalar expression");
+ if (E->getType()->isVoidType())
+ return 0;
+ return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ // Vector Mask Case
+ if (E->getNumSubExprs() == 2 ||
+ (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) {
+ Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
+ Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
+ Value *Mask;
+
+ const llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
+ unsigned LHSElts = LTy->getNumElements();
+
+ if (E->getNumSubExprs() == 3) {
+ Mask = CGF.EmitScalarExpr(E->getExpr(2));
+
+ // Shuffle LHS & RHS into one input vector.
+ llvm::SmallVector<llvm::Constant*, 32> concat;
+ for (unsigned i = 0; i != LHSElts; ++i) {
+ concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i));
+ concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i+1));
+ }
+
+ Value* CV = llvm::ConstantVector::get(concat.begin(), concat.size());
+ LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
+ LHSElts *= 2;
+ } else {
+ Mask = RHS;
+ }
+
+ const llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
+ llvm::Constant* EltMask;
+
+ // Treat vec3 like vec4.
+ if ((LHSElts == 6) && (E->getNumSubExprs() == 3))
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts+2))-1);
+ else if ((LHSElts == 3) && (E->getNumSubExprs() == 2))
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts+1))-1);
+ else
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts))-1);
+
+ // Mask off the high bits of each shuffle index.
+ llvm::SmallVector<llvm::Constant *, 32> MaskV;
+ for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i)
+ MaskV.push_back(EltMask);
+
+ Value* MaskBits = llvm::ConstantVector::get(MaskV.begin(), MaskV.size());
+ Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
+
+ // newv = undef
+ // mask = mask & maskbits
+ // for each elt
+ // n = extract mask i
+ // x = extract val n
+ // newv = insert newv, x, i
+ const llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
+ MTy->getNumElements());
+ Value* NewV = llvm::UndefValue::get(RTy);
+ for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
+ Value *Indx = llvm::ConstantInt::get(CGF.Int32Ty, i);
+ Indx = Builder.CreateExtractElement(Mask, Indx, "shuf_idx");
+ Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext");
+
+ // Handle vec3 special since the index will be off by one for the RHS.
+ if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) {
+ Value *cmpIndx, *newIndx;
+ cmpIndx = Builder.CreateICmpUGT(Indx,
+ llvm::ConstantInt::get(CGF.Int32Ty, 3),
+ "cmp_shuf_idx");
+ newIndx = Builder.CreateSub(Indx, llvm::ConstantInt::get(CGF.Int32Ty,1),
+ "shuf_idx_adj");
+ Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx");
+ }
+ Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
+ NewV = Builder.CreateInsertElement(NewV, VExt, Indx, "shuf_ins");
+ }
+ return NewV;
+ }
+
+ Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
+ Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
+
+ // Handle vec3 special since the index will be off by one for the RHS.
+ llvm::SmallVector<llvm::Constant*, 32> indices;
+ for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
+ llvm::Constant *C = cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i)));
+ const llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
+ if (VTy->getNumElements() == 3) {
+ if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) {
+ uint64_t cVal = CI->getZExtValue();
+ if (cVal > 3) {
+ C = llvm::ConstantInt::get(C->getType(), cVal-1);
+ }
+ }
+ }
+ indices.push_back(C);
+ }
+
+ Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
+ return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
+}
+Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
+ Expr::EvalResult Result;
+ if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
+ if (E->isArrow())
+ CGF.EmitScalarExpr(E->getBase());
+ else
+ EmitLValue(E->getBase());
+ return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+ }
+ return EmitLoadOfLValue(E);
+}
+
+Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Emit subscript expressions in rvalue context's. For most cases, this just
+ // loads the lvalue formed by the subscript expr. However, we have to be
+ // careful, because the base of a vector subscript is occasionally an rvalue,
+ // so we can't get it as an lvalue.
+ if (!E->getBase()->getType()->isVectorType())
+ return EmitLoadOfLValue(E);
+
+ // Handle the vector case. The base must be a vector, the index must be an
+ // integer value.
+ Value *Base = Visit(E->getBase());
+ Value *Idx = Visit(E->getIdx());
+ bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
+ Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast");
+ return Builder.CreateExtractElement(Base, Idx, "vecext");
+}
+
+static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
+ unsigned Off, const llvm::Type *I32Ty) {
+ int MV = SVI->getMaskValue(Idx);
+ if (MV == -1)
+ return llvm::UndefValue::get(I32Ty);
+ return llvm::ConstantInt::get(I32Ty, Off+MV);
+}
+
+Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ unsigned NumInitElements = E->getNumInits();
+
+ if (E->hadArrayRangeDesignator())
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+
+ const llvm::VectorType *VType =
+ dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
+
+ // We have a scalar in braces. Just use the first element.
+ if (!VType)
+ return Visit(E->getInit(0));
+
+ unsigned ResElts = VType->getNumElements();
+
+ // Loop over initializers collecting the Value for each, and remembering
+ // whether the source was swizzle (ExtVectorElementExpr). This will allow
+ // us to fold the shuffle for the swizzle into the shuffle for the vector
+ // initializer, since LLVM optimizers generally do not want to touch
+ // shuffles.
+ unsigned CurIdx = 0;
+ bool VIsUndefShuffle = false;
+ llvm::Value *V = llvm::UndefValue::get(VType);
+ for (unsigned i = 0; i != NumInitElements; ++i) {
+ Expr *IE = E->getInit(i);
+ Value *Init = Visit(IE);
+ llvm::SmallVector<llvm::Constant*, 16> Args;
+
+ const llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
+
+ // Handle scalar elements. If the scalar initializer is actually one
+ // element of a different vector of the same width, use shuffle instead of
+ // extract+insert.
+ if (!VVT) {
+ if (isa<ExtVectorElementExpr>(IE)) {
+ llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
+
+ if (EI->getVectorOperandType()->getNumElements() == ResElts) {
+ llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
+ Value *LHS = 0, *RHS = 0;
+ if (CurIdx == 0) {
+ // insert into undef -> shuffle (src, undef)
+ Args.push_back(C);
+ for (unsigned j = 1; j != ResElts; ++j)
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+
+ LHS = EI->getVectorOperand();
+ RHS = V;
+ VIsUndefShuffle = true;
+ } else if (VIsUndefShuffle) {
+ // insert into undefshuffle && size match -> shuffle (v, src)
+ llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
+ for (unsigned j = 0; j != CurIdx; ++j)
+ Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ ResElts + C->getZExtValue()));
+ for (unsigned j = CurIdx + 1; j != ResElts; ++j)
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+
+ LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+ RHS = EI->getVectorOperand();
+ VIsUndefShuffle = false;
+ }
+ if (!Args.empty()) {
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ V = Builder.CreateShuffleVector(LHS, RHS, Mask);
+ ++CurIdx;
+ continue;
+ }
+ }
+ }
+ Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx);
+ V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
+ VIsUndefShuffle = false;
+ ++CurIdx;
+ continue;
+ }
+
+ unsigned InitElts = VVT->getNumElements();
+
+ // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
+ // input is the same width as the vector being constructed, generate an
+ // optimized shuffle of the swizzle input into the result.
+ unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
+ if (isa<ExtVectorElementExpr>(IE)) {
+ llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
+ Value *SVOp = SVI->getOperand(0);
+ const llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
+
+ if (OpTy->getNumElements() == ResElts) {
+ for (unsigned j = 0; j != CurIdx; ++j) {
+ // If the current vector initializer is a shuffle with undef, merge
+ // this shuffle directly into it.
+ if (VIsUndefShuffle) {
+ Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
+ CGF.Int32Ty));
+ } else {
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
+ }
+ }
+ for (unsigned j = 0, je = InitElts; j != je; ++j)
+ Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
+ for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+
+ if (VIsUndefShuffle)
+ V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+
+ Init = SVOp;
+ }
+ }
+
+ // Extend init to result vector length, and then shuffle its contribution
+ // to the vector initializer into V.
+ if (Args.empty()) {
+ for (unsigned j = 0; j != InitElts; ++j)
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
+ for (unsigned j = InitElts; j != ResElts; ++j)
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
+ Mask, "vext");
+
+ Args.clear();
+ for (unsigned j = 0; j != CurIdx; ++j)
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
+ for (unsigned j = 0; j != InitElts; ++j)
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j+Offset));
+ for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ }
+
+ // If V is undef, make sure it ends up on the RHS of the shuffle to aid
+ // merging subsequent shuffles into this one.
+ if (CurIdx == 0)
+ std::swap(V, Init);
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
+ VIsUndefShuffle = isa<llvm::UndefValue>(Init);
+ CurIdx += InitElts;
+ }
+
+ // FIXME: evaluate codegen vs. shuffling against constant null vector.
+ // Emit remaining default initializers.
+ const llvm::Type *EltTy = VType->getElementType();
+
+ // Emit remaining default initializers
+ for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
+ Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx);
+ llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
+ V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
+ }
+ return V;
+}
+
+static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
+ const Expr *E = CE->getSubExpr();
+
+ if (CE->getCastKind() == CK_UncheckedDerivedToBase)
+ return false;
+
+ if (isa<CXXThisExpr>(E)) {
+ // We always assume that 'this' is never null.
+ return false;
+ }
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
+ // And that glvalue casts are never null.
+ if (ICE->getValueKind() != VK_RValue)
+ return false;
+ }
+
+ return true;
+}
+
+// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
+// have to handle a more broad range of conversions than explicit casts, as they
+// handle things like function to ptr-to-function decay etc.
+Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
+ Expr *E = CE->getSubExpr();
+ QualType DestTy = CE->getType();
+ CastKind Kind = CE->getCastKind();
+
+ if (!DestTy->isVoidType())
+ TestAndClearIgnoreResultAssign();
+
+ // Since almost all cast kinds apply to scalars, this switch doesn't have
+ // a default case, so the compiler will warn on a missing case. The cases
+ // are in the same order as in the CastKind enum.
+ switch (Kind) {
+ case CK_Unknown:
+ // FIXME: All casts should have a known kind!
+ //assert(0 && "Unknown cast kind!");
+ break;
+
+ case CK_LValueBitCast:
+ case CK_ObjCObjectLValueCast: {
+ Value *V = EmitLValue(E).getAddress();
+ V = Builder.CreateBitCast(V,
+ ConvertType(CGF.getContext().getPointerType(DestTy)));
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), DestTy);
+ }
+
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+ return Builder.CreateBitCast(Src, ConvertType(DestTy));
+ }
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ return Visit(const_cast<Expr*>(E));
+
+ case CK_BaseToDerived: {
+ const CXXRecordDecl *DerivedClassDecl =
+ DestTy->getCXXRecordDeclForPointerType();
+
+ return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
+ CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE));
+ }
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
+ CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE));
+ }
+ case CK_Dynamic: {
+ Value *V = Visit(const_cast<Expr*>(E));
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
+ return CGF.EmitDynamicCast(V, DCE);
+ }
+ case CK_ToUnion:
+ assert(0 && "Should be unreachable!");
+ break;
+
+ case CK_ArrayToPointerDecay: {
+ assert(E->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+
+ Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays.
+
+ // Note that VLA pointers are always decayed, so we don't need to do
+ // anything here.
+ if (!E->getType()->isVariableArrayType()) {
+ assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
+ assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
+ ->getElementType()) &&
+ "Expected pointer to array");
+ V = Builder.CreateStructGEP(V, 0, "arraydecay");
+ }
+
+ return V;
+ }
+ case CK_FunctionToPointerDecay:
+ return EmitLValue(E).getAddress();
+
+ case CK_NullToMemberPointer: {
+ // If the subexpression's type is the C++0x nullptr_t, emit the
+ // subexpression, which may have side effects.
+ if (E->getType()->isNullPtrType())
+ (void) Visit(E);
+
+ const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
+ return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
+ }
+
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer: {
+ Value *Src = Visit(E);
+
+ // Note that the AST doesn't distinguish between checked and
+ // unchecked member pointer conversions, so we always have to
+ // implement checked conversions here. This is inefficient when
+ // actual control flow may be required in order to perform the
+ // check, which it is for data member pointers (but not member
+ // function pointers on Itanium and ARM).
+ return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
+ }
+
+
+ case CK_ConstructorConversion:
+ assert(0 && "Should be unreachable!");
+ break;
+
+ case CK_IntegralToPointer: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ const llvm::Type *MiddleTy = CGF.IntPtrTy;
+ bool InputSigned = E->getType()->isSignedIntegerType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+
+ return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
+ }
+ case CK_PointerToIntegral: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+
+ // Handle conversion to bool correctly.
+ if (DestTy->isBooleanType())
+ return EmitScalarConversion(Src, E->getType(), DestTy);
+
+ return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
+ }
+ case CK_ToVoid: {
+ if (E->Classify(CGF.getContext()).isGLValue())
+ CGF.EmitLValue(E);
+ else
+ CGF.EmitAnyExpr(E, 0, false, true);
+ return 0;
+ }
+ case CK_VectorSplat: {
+ const llvm::Type *DstTy = ConvertType(DestTy);
+ Value *Elt = Visit(const_cast<Expr*>(E));
+
+ // Insert the element in element zero of an undef vector
+ llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+ llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
+
+ // Splat the element across to all elements
+ llvm::SmallVector<llvm::Constant*, 16> Args;
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ for (unsigned i = 0; i < NumElements; i++)
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+ llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+ return Yay;
+ }
+ case CK_IntegralCast:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingCast:
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy);
+
+ case CK_MemberPointerToBoolean: {
+ llvm::Value *MemPtr = Visit(E);
+ const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
+ return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
+ }
+ }
+
+ // Handle cases where the source is an non-complex type.
+
+ if (!CGF.hasAggregateLLVMType(E->getType())) {
+ Value *Src = Visit(const_cast<Expr*>(E));
+
+ // Use EmitScalarConversion to perform the conversion.
+ return EmitScalarConversion(Src, E->getType(), DestTy);
+ }
+
+ if (E->getType()->isAnyComplexType()) {
+ // Handle cases where the source is a complex type.
+ bool IgnoreImag = true;
+ bool IgnoreImagAssign = true;
+ bool IgnoreReal = IgnoreResultAssign;
+ bool IgnoreRealAssign = IgnoreResultAssign;
+ if (DestTy->isBooleanType())
+ IgnoreImagAssign = IgnoreImag = false;
+ else if (DestTy->isVoidType()) {
+ IgnoreReal = IgnoreImag = false;
+ IgnoreRealAssign = IgnoreImagAssign = true;
+ }
+ CodeGenFunction::ComplexPairTy V
+ = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign,
+ IgnoreImagAssign);
+ return EmitComplexToScalarConversion(V, E->getType(), DestTy);
+ }
+
+ // Okay, this is a cast from an aggregate. It must be a cast to void. Just
+ // evaluate the result and return.
+ CGF.EmitAggExpr(E, 0, false, true);
+ return 0;
+}
+
+Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ return CGF.EmitCompoundStmt(*E->getSubStmt(),
+ !E->getType()->isVoidType()).getScalarVal();
+}
+
+Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
+ llvm::Value *V = CGF.GetAddrOfBlockDecl(E);
+ if (E->getType().isObjCGCWeak())
+ return CGF.CGM.getObjCRuntime().EmitObjCWeakRead(CGF, V);
+ return CGF.EmitLoadOfScalar(V, false, 0, E->getType());
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Operators
+//===----------------------------------------------------------------------===//
+
+llvm::Value *ScalarExprEmitter::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+
+ QualType ValTy = E->getSubExpr()->getType();
+ llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy);
+
+ int AmountVal = isInc ? 1 : -1;
+
+ if (ValTy->isPointerType() &&
+ ValTy->getAs<PointerType>()->isVariableArrayType()) {
+ // The amount of the addition/subtraction needs to account for the VLA size
+ CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
+ }
+
+ llvm::Value *NextVal;
+ if (const llvm::PointerType *PT =
+ dyn_cast<llvm::PointerType>(InVal->getType())) {
+ llvm::Constant *Inc = llvm::ConstantInt::get(CGF.Int32Ty, AmountVal);
+ if (!isa<llvm::FunctionType>(PT->getElementType())) {
+ QualType PTEE = ValTy->getPointeeType();
+ if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) {
+ // Handle interface types, which are not represented with a concrete
+ // type.
+ int size = CGF.getContext().getTypeSize(OIT) / 8;
+ if (!isInc)
+ size = -size;
+ Inc = llvm::ConstantInt::get(Inc->getType(), size);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ InVal = Builder.CreateBitCast(InVal, i8Ty);
+ NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
+ llvm::Value *lhs = LV.getAddress();
+ lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
+ LV = CGF.MakeAddrLValue(lhs, ValTy);
+ } else
+ NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
+ } else {
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
+ NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
+ NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
+ }
+ } else if (InVal->getType()->isIntegerTy(1) && isInc) {
+ // Bool++ is an interesting case, due to promotion rules, we get:
+ // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
+ // Bool = ((int)Bool+1) != 0
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ NextVal = llvm::ConstantInt::getTrue(VMContext);
+ } else if (isa<llvm::IntegerType>(InVal->getType())) {
+ NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
+
+ if (!ValTy->isSignedIntegerType())
+ // Unsigned integer inc is always two's complement.
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ else {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ break;
+ case LangOptions::SOB_Defined:
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ break;
+ case LangOptions::SOB_Trapping:
+ BinOpInfo BinOp;
+ BinOp.LHS = InVal;
+ BinOp.RHS = NextVal;
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BO_Add;
+ BinOp.E = E;
+ NextVal = EmitOverflowCheckedBinOp(BinOp);
+ break;
+ }
+ }
+ } else {
+ // Add the inc/dec to the real part.
+ if (InVal->getType()->isFloatTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(AmountVal)));
+ else if (InVal->getType()->isDoubleTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(AmountVal)));
+ else {
+ llvm::APFloat F(static_cast<float>(AmountVal));
+ bool ignored;
+ F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
+ &ignored);
+ NextVal = llvm::ConstantFP::get(VMContext, F);
+ }
+ NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ }
+
+ // Store the updated result through the lvalue.
+ if (LV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? NextVal : InVal;
+}
+
+
+
+Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ // Emit unary minus with EmitSub so we handle overflow cases etc.
+ BinOpInfo BinOp;
+ BinOp.RHS = Visit(E->getSubExpr());
+
+ if (BinOp.RHS->getType()->isFPOrFPVectorTy())
+ BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
+ else
+ BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BO_Sub;
+ BinOp.E = E;
+ return EmitSub(BinOp);
+}
+
+Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op = Visit(E->getSubExpr());
+ return Builder.CreateNot(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
+ // Compare operand to zero.
+ Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
+
+ // Invert value.
+ // TODO: Could dynamically modify easy computations here. For example, if
+ // the operand is an icmp ne, turn into icmp eq.
+ BoolVal = Builder.CreateNot(BoolVal, "lnot");
+
+ // ZExt result to the expr type.
+ return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
+}
+
+Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ // Try folding the offsetof to a constant.
+ Expr::EvalResult EvalResult;
+ if (E->Evaluate(EvalResult, CGF.getContext()))
+ return llvm::ConstantInt::get(VMContext, EvalResult.Val.getInt());
+
+ // Loop over the components of the offsetof to compute the value.
+ unsigned n = E->getNumComponents();
+ const llvm::Type* ResultType = ConvertType(E->getType());
+ llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
+ QualType CurrentType = E->getTypeSourceInfo()->getType();
+ for (unsigned i = 0; i != n; ++i) {
+ OffsetOfExpr::OffsetOfNode ON = E->getComponent(i);
+ llvm::Value *Offset = 0;
+ switch (ON.getKind()) {
+ case OffsetOfExpr::OffsetOfNode::Array: {
+ // Compute the index
+ Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
+ llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
+ bool IdxSigned = IdxExpr->getType()->isSignedIntegerType();
+ Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
+
+ // Save the element type
+ CurrentType =
+ CGF.getContext().getAsArrayType(CurrentType)->getElementType();
+
+ // Compute the element size
+ llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
+ CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
+
+ // Multiply out to compute the result
+ Offset = Builder.CreateMul(Idx, ElemSize);
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Field: {
+ FieldDecl *MemberDecl = ON.getField();
+ RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+
+ // Compute the index of the field in its parent.
+ unsigned i = 0;
+ // FIXME: It would be nice if we didn't have to loop here!
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
+ Field != FieldEnd; (void)++Field, ++i) {
+ if (*Field == MemberDecl)
+ break;
+ }
+ assert(i < RL.getFieldCount() && "offsetof field in wrong type");
+
+ // Compute the offset to the field
+ int64_t OffsetInt = RL.getFieldOffset(i) /
+ CGF.getContext().getCharWidth();
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+
+ // Save the element type.
+ CurrentType = MemberDecl->getType();
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Identifier:
+ llvm_unreachable("dependent __builtin_offsetof");
+
+ case OffsetOfExpr::OffsetOfNode::Base: {
+ if (ON.getBase()->isVirtual()) {
+ CGF.ErrorUnsupported(E, "virtual base in offsetof");
+ continue;
+ }
+
+ RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+
+ // Save the element type.
+ CurrentType = ON.getBase()->getType();
+
+ // Compute the offset to the base.
+ const RecordType *BaseRT = CurrentType->getAs<RecordType>();
+ CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
+ int64_t OffsetInt = RL.getBaseClassOffset(BaseRD) /
+ CGF.getContext().getCharWidth();
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+ break;
+ }
+ }
+ Result = Builder.CreateAdd(Result, Offset);
+ }
+ return Result;
+}
+
+/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
+/// argument of the sizeof expression as an integer.
+Value *
+ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
+ QualType TypeToSize = E->getTypeOfArgument();
+ if (E->isSizeOf()) {
+ if (const VariableArrayType *VAT =
+ CGF.getContext().getAsVariableArrayType(TypeToSize)) {
+ if (E->isArgumentType()) {
+ // sizeof(type) - make sure to emit the VLA size.
+ CGF.EmitVLASize(TypeToSize);
+ } else {
+ // C99 6.5.3.4p2: If the argument is an expression of type
+ // VLA, it is evaluated.
+ CGF.EmitAnyExpr(E->getArgumentExpr());
+ }
+
+ return CGF.GetVLASize(VAT);
+ }
+ }
+
+ // If this isn't sizeof(vla), the result must be constant; use the constant
+ // folding logic so we don't have to duplicate it here.
+ Expr::EvalResult Result;
+ E->Evaluate(Result, CGF.getContext());
+ return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+}
+
+Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType())
+ return CGF.EmitComplexExpr(Op, false, true, false, true).first;
+ return Visit(Op);
+}
+Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType())
+ return CGF.EmitComplexExpr(Op, true, false, true, false).second;
+
+ // __imag on a scalar returns zero. Emit the subexpr to ensure side
+ // effects are evaluated, but not the actual value.
+ if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
+ CGF.EmitLValue(Op);
+ else
+ CGF.EmitScalarExpr(Op, true);
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Operators
+//===----------------------------------------------------------------------===//
+
+BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ BinOpInfo Result;
+ Result.LHS = Visit(E->getLHS());
+ Result.RHS = Visit(E->getRHS());
+ Result.Ty = E->getType();
+ Result.Opcode = E->getOpcode();
+ Result.E = E;
+ return Result;
+}
+
+LValue ScalarExprEmitter::EmitCompoundAssignLValue(
+ const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
+ Value *&Result) {
+ QualType LHSTy = E->getLHS()->getType();
+ BinOpInfo OpInfo;
+
+ if (E->getComputationResultType()->isAnyComplexType()) {
+ // This needs to go through the complex expression emitter, but it's a tad
+ // complicated to do that... I'm leaving it out for now. (Note that we do
+ // actually need the imaginary part of the RHS for multiplication and
+ // division.)
+ CGF.ErrorUnsupported(E, "complex compound assignment");
+ Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+ return LValue();
+ }
+
+ // Emit the RHS first. __block variables need to have the rhs evaluated
+ // first, plus this should improve codegen a little.
+ OpInfo.RHS = Visit(E->getRHS());
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.Opcode = E->getOpcode();
+ OpInfo.E = E;
+ // Load/convert the LHS.
+ LValue LHSLV = EmitCheckedLValue(E->getLHS());
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
+ OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
+ E->getComputationLHSType());
+
+ // Expand the binary operator.
+ Result = (this->*Func)(OpInfo);
+
+ // Convert the result back to the LHS type.
+ Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+
+ // Store the result value into the LHS lvalue. Bit-fields are handled
+ // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after the
+ // assignment...'.
+ if (LHSLV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
+ &Result);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
+
+ return LHSLV;
+}
+
+Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ Value *RHS;
+ LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
+
+ // If the result is clearly ignored, return now.
+ if (Ignore)
+ return 0;
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfLValue(LHS, E->getType());
+}
+
+
+Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ else if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
+ else
+ return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
+}
+
+Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
+ // Rem in C can't be a floating point type: C99 6.5.5p2.
+ if (Ops.Ty->isUnsignedIntegerType())
+ return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
+ else
+ return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
+}
+
+Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
+ unsigned IID;
+ unsigned OpID = 0;
+
+ switch (Ops.Opcode) {
+ case BO_Add:
+ case BO_AddAssign:
+ OpID = 1;
+ IID = llvm::Intrinsic::sadd_with_overflow;
+ break;
+ case BO_Sub:
+ case BO_SubAssign:
+ OpID = 2;
+ IID = llvm::Intrinsic::ssub_with_overflow;
+ break;
+ case BO_Mul:
+ case BO_MulAssign:
+ OpID = 3;
+ IID = llvm::Intrinsic::smul_with_overflow;
+ break;
+ default:
+ assert(false && "Unsupported operation for overflow detection");
+ IID = 0;
+ }
+ OpID <<= 1;
+ OpID |= 1;
+
+ const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
+
+ llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1);
+
+ Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
+ Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
+ Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+
+ // Branch in case of overflow.
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+ llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn);
+
+ Builder.CreateCondBr(overflow, overflowBB, continueBB);
+
+ // Handle overflow with llvm.trap.
+ // TODO: it would be better to generate one of these blocks per function.
+ Builder.SetInsertPoint(overflowBB);
+ llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
+ Builder.CreateCall(Trap);
+ Builder.CreateUnreachable();
+
+ // Continue on.
+ Builder.SetInsertPoint(continueBB);
+ return result;
+}
+
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
+ if (!Ops.Ty->isAnyPointerType()) {
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
+
+ return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
+ }
+
+ // Must have binary (not unary) expr here. Unary pointer decrement doesn't
+ // use this path.
+ const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
+
+ if (Ops.Ty->isPointerType() &&
+ Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
+ // The amount of the addition needs to account for the VLA size
+ CGF.ErrorUnsupported(BinOp, "VLA pointer addition");
+ }
+
+ Value *Ptr, *Idx;
+ Expr *IdxExp;
+ const PointerType *PT = BinOp->getLHS()->getType()->getAs<PointerType>();
+ const ObjCObjectPointerType *OPT =
+ BinOp->getLHS()->getType()->getAs<ObjCObjectPointerType>();
+ if (PT || OPT) {
+ Ptr = Ops.LHS;
+ Idx = Ops.RHS;
+ IdxExp = BinOp->getRHS();
+ } else { // int + pointer
+ PT = BinOp->getRHS()->getType()->getAs<PointerType>();
+ OPT = BinOp->getRHS()->getType()->getAs<ObjCObjectPointerType>();
+ assert((PT || OPT) && "Invalid add expr");
+ Ptr = Ops.RHS;
+ Idx = Ops.LHS;
+ IdxExp = BinOp->getLHS();
+ }
+
+ unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (Width < CGF.LLVMPointerWidth) {
+ // Zero or sign extend the pointer value based on whether the index is
+ // signed or not.
+ const llvm::Type *IdxType = CGF.IntPtrTy;
+ if (IdxExp->getType()->isSignedIntegerType())
+ Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
+ else
+ Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
+ }
+ const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
+ // Handle interface types, which are not represented with a concrete type.
+ if (const ObjCObjectType *OIT = ElementType->getAs<ObjCObjectType>()) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ CGF.getContext().getTypeSizeInChars(OIT).getQuantity());
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
+ Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ptr->getType());
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic extensions. The
+ // GNU void* casts amount to no-ops since our void* type is i8*, but this is
+ // future proof.
+ if (ElementType->isVoidType() || ElementType->isFunctionType()) {
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
+ Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ptr->getType());
+ }
+
+ return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr");
+}
+
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
+ if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
+
+ return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ }
+
+ // Must have binary (not unary) expr here. Unary pointer increment doesn't
+ // use this path.
+ const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
+
+ if (BinOp->getLHS()->getType()->isPointerType() &&
+ BinOp->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
+ // The amount of the addition needs to account for the VLA size for
+ // ptr-int
+ // The amount of the division needs to account for the VLA size for
+ // ptr-ptr.
+ CGF.ErrorUnsupported(BinOp, "VLA pointer subtraction");
+ }
+
+ const QualType LHSType = BinOp->getLHS()->getType();
+ const QualType LHSElementType = LHSType->getPointeeType();
+ if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
+ // pointer - int
+ Value *Idx = Ops.RHS;
+ unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (Width < CGF.LLVMPointerWidth) {
+ // Zero or sign extend the pointer value based on whether the index is
+ // signed or not.
+ const llvm::Type *IdxType = CGF.IntPtrTy;
+ if (BinOp->getRHS()->getType()->isSignedIntegerType())
+ Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
+ else
+ Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
+ }
+ Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
+
+ // Handle interface types, which are not represented with a concrete type.
+ if (const ObjCObjectType *OIT = LHSElementType->getAs<ObjCObjectType>()) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ CGF.getContext().
+ getTypeSizeInChars(OIT).getQuantity());
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
+ Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ops.LHS->getType());
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic
+ // extensions. The GNU void* casts amount to no-ops since our void* type is
+ // i8*, but this is future proof.
+ if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
+ Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
+ return Builder.CreateBitCast(Res, Ops.LHS->getType());
+ }
+
+ return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr");
+ } else {
+ // pointer - pointer
+ Value *LHS = Ops.LHS;
+ Value *RHS = Ops.RHS;
+
+ CharUnits ElementSize;
+
+ // Handle GCC extension for pointer arithmetic on void* and function pointer
+ // types.
+ if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
+ ElementSize = CharUnits::One();
+ } else {
+ ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType);
+ }
+
+ const llvm::Type *ResultType = ConvertType(Ops.Ty);
+ LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+
+ // Optimize out the shift for element size of 1.
+ if (ElementSize.isOne())
+ return BytesBetween;
+
+ // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
+ // pointer difference in C is only defined in the case where both operands
+ // are pointing to elements of an array.
+ Value *BytesPerElt =
+ llvm::ConstantInt::get(ResultType, ElementSize.getQuantity());
+ return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
+ }
+}
+
+Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ if (CGF.CatchUndefined
+ && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+ CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
+ llvm::ConstantInt::get(RHS->getType(), Width)),
+ Cont, CGF.getTrapBB());
+ CGF.EmitBlock(Cont);
+ }
+
+ return Builder.CreateShl(Ops.LHS, RHS, "shl");
+}
+
+Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ if (CGF.CatchUndefined
+ && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+ CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
+ llvm::ConstantInt::get(RHS->getType(), Width)),
+ Cont, CGF.getTrapBB());
+ CGF.EmitBlock(Cont);
+ }
+
+ if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateLShr(Ops.LHS, RHS, "shr");
+ return Builder.CreateAShr(Ops.LHS, RHS, "shr");
+}
+
+Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc) {
+ TestAndClearIgnoreResultAssign();
+ Value *Result;
+ QualType LHSTy = E->getLHS()->getType();
+ if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
+ assert(E->getOpcode() == BO_EQ ||
+ E->getOpcode() == BO_NE);
+ Value *LHS = CGF.EmitScalarExpr(E->getLHS());
+ Value *RHS = CGF.EmitScalarExpr(E->getRHS());
+ Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
+ CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
+ } else if (!LHSTy->isAnyComplexType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+
+ if (LHS->getType()->isFPOrFPVectorTy()) {
+ Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
+ LHS, RHS, "cmp");
+ } else if (LHSTy->hasSignedIntegerRepresentation()) {
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
+ LHS, RHS, "cmp");
+ } else {
+ // Unsigned integers and pointers.
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS, RHS, "cmp");
+ }
+
+ // If this is a vector comparison, sign extend the result to the appropriate
+ // vector integer type and return it (don't convert to bool).
+ if (LHSTy->isVectorType())
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+
+ } else {
+ // Complex Comparison: can only be an equality comparison.
+ CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
+ CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
+
+ QualType CETy = LHSTy->getAs<ComplexType>()->getElementType();
+
+ Value *ResultR, *ResultI;
+ if (CETy->isRealFloatingType()) {
+ ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ } else {
+ // Complex comparisons can only be equality comparisons. As such, signed
+ // and unsigned opcodes are the same.
+ ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ }
+
+ if (E->getOpcode() == BO_EQ) {
+ Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
+ } else {
+ assert(E->getOpcode() == BO_NE &&
+ "Complex comparison other than == or != ?");
+ Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
+ }
+ }
+
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen just a little.
+ Value *RHS = Visit(E->getRHS());
+ LValue LHS = EmitCheckedLValue(E->getLHS());
+
+ // Store the value into the LHS. Bit-fields are handled specially
+ // because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after
+ // the assignment...'.
+ if (LHS.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
+ &RHS);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+
+ // If the result is clearly ignored, return now.
+ if (Ignore)
+ return 0;
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfLValue(LHS, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+ const llvm::Type *ResTy = ConvertType(E->getType());
+
+ // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
+ // If we have 1 && X, just emit X without inserting the control flow.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
+ if (Cond == 1) { // If we have 1 && X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int or bool.
+ return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
+ }
+
+ // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::Constant::getNullValue(ResTy);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
+
+ // Branch on the LHS first. If it is false, go to the failure (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be false. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext),
+ "", ContBlock);
+ PN->reserveOperandSpace(2); // Normal case, two inputs.
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
+
+ CGF.BeginConditionalBranch();
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ CGF.EndConditionalBranch();
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+ const llvm::Type *ResTy = ConvertType(E->getType());
+
+ // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
+ // If we have 0 || X, just emit X without inserting the control flow.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
+ if (Cond == -1) { // If we have 0 || X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int or bool.
+ return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
+ }
+
+ // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::ConstantInt::get(ResTy, 1);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
+
+ // Branch on the LHS first. If it is true, go to the success (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be true. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext),
+ "", ContBlock);
+ PN->reserveOperandSpace(2); // Normal case, two inputs.
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
+
+ CGF.BeginConditionalBranch();
+
+ // Emit the RHS condition as a bool value.
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ CGF.EndConditionalBranch();
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitStmt(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+//===----------------------------------------------------------------------===//
+// Other Operators
+//===----------------------------------------------------------------------===//
+
+/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
+/// expression is cheap enough and side-effect-free enough to evaluate
+/// unconditionally instead of conditionally. This is used to convert control
+/// flow into selects in some cases.
+static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
+ CodeGenFunction &CGF) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr(), CGF);
+
+ // TODO: Allow anything we can constant fold to an integer or fp constant.
+ if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
+ isa<FloatingLiteral>(E))
+ return true;
+
+ // Non-volatile automatic variables too, to get "cond ? X : Y" where
+ // X and Y are local variables.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasLocalStorage() && !(CGF.getContext()
+ .getCanonicalType(VD->getType())
+ .isVolatileQualified()))
+ return true;
+
+ return false;
+}
+
+
+Value *ScalarExprEmitter::
+VisitConditionalOperator(const ConditionalOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){
+ Expr *Live = E->getLHS(), *Dead = E->getRHS();
+ if (Cond == -1)
+ std::swap(Live, Dead);
+
+ // If the dead side doesn't have labels we need, and if the Live side isn't
+ // the gnu missing ?: extension (which we could handle, but don't bother
+ // to), just emit the Live part.
+ if ((!Dead || !CGF.ContainsLabel(Dead)) && // No labels in dead part
+ Live) // Live part isn't missing.
+ return Visit(Live);
+ }
+
+
+ // If this is a really simple expression (like x ? 4 : 5), emit this as a
+ // select instead of as control flow. We can only do this if it is cheap and
+ // safe to evaluate the LHS and RHS unconditionally.
+ if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS(),
+ CGF) &&
+ isCheapEnoughToEvaluateUnconditionally(E->getRHS(), CGF)) {
+ llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond());
+ llvm::Value *LHS = Visit(E->getLHS());
+ llvm::Value *RHS = Visit(E->getRHS());
+ return Builder.CreateSelect(CondV, LHS, RHS, "cond");
+ }
+
+ if (!E->getLHS() && CGF.getContext().getLangOptions().CPlusPlus) {
+ // Does not support GNU missing condition extension in C++ yet (see #7726)
+ CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
+ return llvm::UndefValue::get(ConvertType(E->getType()));
+ }
+
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+ Value *CondVal = 0;
+
+ // If we don't have the GNU missing condition extension, emit a branch on bool
+ // the normal way.
+ if (E->getLHS()) {
+ // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for
+ // the branch on bool.
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+ } else {
+ // Otherwise, for the ?: extension, evaluate the conditional and then
+ // convert it to bool the hard way. We do this explicitly because we need
+ // the unconverted value for the missing middle value of the ?:.
+ CondVal = CGF.EmitScalarExpr(E->getCond());
+
+ // In some cases, EmitScalarConversion will delete the "CondVal" expression
+ // if there are no extra uses (an optimization). Inhibit this by making an
+ // extra dead use, because we're going to add a use of CondVal later. We
+ // don't use the builder for this, because we don't want it to get optimized
+ // away. This leaves dead code, but the ?: extension isn't common.
+ new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder",
+ Builder.GetInsertBlock());
+
+ Value *CondBoolVal =
+ CGF.EmitScalarConversion(CondVal, E->getCond()->getType(),
+ CGF.getContext().BoolTy);
+ Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock);
+ }
+
+ CGF.BeginConditionalBranch();
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ Value *LHS;
+ if (E->getLHS())
+ LHS = Visit(E->getLHS());
+ else // Perform promotions, to handle cases like "short ?: int"
+ LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
+
+ CGF.EndConditionalBranch();
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.BeginConditionalBranch();
+ CGF.EmitBlock(RHSBlock);
+
+ Value *RHS = Visit(E->getRHS());
+ CGF.EndConditionalBranch();
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+
+ // If the LHS or RHS is a throw expression, it will be legitimately null.
+ if (!LHS)
+ return RHS;
+ if (!RHS)
+ return LHS;
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond");
+ PN->reserveOperandSpace(2);
+ PN->addIncoming(LHS, LHSBlock);
+ PN->addIncoming(RHS, RHSBlock);
+ return PN;
+}
+
+Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ // If EmitVAArg fails, we fall back to the LLVM instruction.
+ if (!ArgPtr)
+ return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
+
+ // FIXME Volatility.
+ return Builder.CreateLoad(ArgPtr);
+}
+
+Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) {
+ return CGF.BuildBlockLiteralTmp(BE);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitScalarExpr - Emit the computation of the specified expression of scalar
+/// type, ignoring the result.
+Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
+ assert(E && !hasAggregateLLVMType(E->getType()) &&
+ "Invalid scalar expression to emit");
+
+ return ScalarExprEmitter(*this, IgnoreResultAssign)
+ .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
+ QualType DstTy) {
+ assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) &&
+ "Invalid scalar expression to emit");
+ return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
+Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
+ QualType SrcTy,
+ QualType DstTy) {
+ assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) &&
+ "Invalid complex -> scalar conversion");
+ return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
+ DstTy);
+}
+
+
+llvm::Value *CodeGenFunction::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
+}
+
+LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
+ llvm::Value *V;
+ // object->isa or (*object).isa
+ // Generate code as for: *(Class*)object
+ // build Class* type
+ const llvm::Type *ClassPtrTy = ConvertType(E->getType());
+
+ Expr *BaseExpr = E->getBase();
+ if (BaseExpr->isLvalue(getContext()) != Expr::LV_Valid) {
+ V = CreateTempAlloca(ClassPtrTy, "resval");
+ llvm::Value *Src = EmitScalarExpr(BaseExpr);
+ Builder.CreateStore(Src, V);
+ V = ScalarExprEmitter(*this).EmitLoadOfLValue(
+ MakeAddrLValue(V, E->getType()), E->getType());
+ } else {
+ if (E->isArrow())
+ V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
+ else
+ V = EmitLValue(BaseExpr).getAddress();
+ }
+
+ // build Class* type
+ ClassPtrTy = ClassPtrTy->getPointerTo();
+ V = Builder.CreateBitCast(V, ClassPtrTy);
+ return MakeAddrLValue(V, E->getType());
+}
+
+
+LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
+ const CompoundAssignOperator *E) {
+ ScalarExprEmitter Scalar(*this);
+ Value *Result = 0;
+ switch (E->getOpcode()) {
+#define COMPOUND_OP(Op) \
+ case BO_##Op##Assign: \
+ return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
+ Result)
+ COMPOUND_OP(Mul);
+ COMPOUND_OP(Div);
+ COMPOUND_OP(Rem);
+ COMPOUND_OP(Add);
+ COMPOUND_OP(Sub);
+ COMPOUND_OP(Shl);
+ COMPOUND_OP(Shr);
+ COMPOUND_OP(And);
+ COMPOUND_OP(Xor);
+ COMPOUND_OP(Or);
+#undef COMPOUND_OP
+
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Add:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ case BO_LAnd:
+ case BO_LOr:
+ case BO_Assign:
+ case BO_Comma:
+ assert(false && "Not valid compound assignment operators");
+ break;
+ }
+
+ llvm_unreachable("Unhandled compound assignment operator");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
new file mode 100644
index 0000000..6a6d63d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -0,0 +1,850 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Objective-C code as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+/// Emits an instance of NSConstantString representing the object.
+llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
+{
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(E->getString());
+ // FIXME: This bitcast should just be made an invariant on the Runtime.
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+}
+
+/// Emit a selector.
+llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
+ // Untyped selector.
+ // Note that this implementation allows for non-constant strings to be passed
+ // as arguments to @selector(). Currently, the only thing preventing this
+ // behaviour is the type checking in the front end.
+ return CGM.getObjCRuntime().GetSelector(Builder, E->getSelector());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
+ // FIXME: This should pass the Decl not the name.
+ return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol());
+}
+
+
+RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return) {
+ // Only the lookup mechanism and first two arguments of the method
+ // implementation vary between runtimes. We can get the receiver and
+ // arguments in generic code.
+
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ bool isSuperMessage = false;
+ bool isClassMessage = false;
+ ObjCInterfaceDecl *OID = 0;
+ // Find the receiver
+ llvm::Value *Receiver = 0;
+ switch (E->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ Receiver = EmitScalarExpr(E->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class: {
+ const ObjCObjectType *ObjTy
+ = E->getClassReceiver()->getAs<ObjCObjectType>();
+ assert(ObjTy && "Invalid Objective-C class message send");
+ OID = ObjTy->getInterface();
+ assert(OID && "Invalid Objective-C class message send");
+ Receiver = Runtime.GetClass(Builder, OID);
+ isClassMessage = true;
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:
+ Receiver = LoadObjCSelf();
+ isSuperMessage = true;
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ Receiver = LoadObjCSelf();
+ isSuperMessage = true;
+ isClassMessage = true;
+ break;
+ }
+
+ CallArgList Args;
+ EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
+
+ QualType ResultType =
+ E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+
+ if (isSuperMessage) {
+ // super is only valid in an Objective-C method
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ return Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
+ E->getSelector(),
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args,
+ E->getMethodDecl());
+ }
+
+ return Runtime.GenerateMessageSend(*this, Return, ResultType,
+ E->getSelector(),
+ Receiver, Args, OID,
+ E->getMethodDecl());
+}
+
+/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
+/// the LLVM function and sets the other context used by
+/// CodeGenFunction.
+void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ FunctionArgList Args;
+ // Check if we should generate debug info for this method.
+ if (CGM.getDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getDebugInfo();
+
+ llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
+
+ const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
+ CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
+
+ Args.push_back(std::make_pair(OMD->getSelfDecl(),
+ OMD->getSelfDecl()->getType()));
+ Args.push_back(std::make_pair(OMD->getCmdDecl(),
+ OMD->getCmdDecl()->getType()));
+
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI)
+ Args.push_back(std::make_pair(*PI, (*PI)->getType()));
+
+ StartFunction(OMD, OMD->getResultType(), Fn, Args, OMD->getLocStart());
+}
+
+/// Generate an Objective-C method. An Objective-C method is a C function with
+/// its pointer, name, and types registered in the class struture.
+void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
+ StartObjCMethod(OMD, OMD->getClassInterface());
+ EmitStmt(OMD->getBody());
+ FinishFunction(OMD->getBodyRBrace());
+}
+
+// FIXME: I wasn't sure about the synthesis approach. If we end up generating an
+// AST for the whole body we can just fall back to having a GenerateFunction
+// which takes the body Stmt.
+
+/// GenerateObjCGetter - Generate an Objective-C property getter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ bool IsAtomic =
+ !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
+ ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
+ assert(OMD && "Invalid call to generate getter (empty method)");
+ StartObjCMethod(OMD, IMP->getClassInterface());
+
+ // Determine if we should use an objc_getProperty call for
+ // this. Non-atomic properties are directly evaluated.
+ // atomic 'copy' and 'retain' properties are also directly
+ // evaluated in gc-only mode.
+ if (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
+ IsAtomic &&
+ (PD->getSetterKind() == ObjCPropertyDecl::Copy ||
+ PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
+ llvm::Value *GetPropertyFn =
+ CGM.getObjCRuntime().GetPropertyGetFunction();
+
+ if (!GetPropertyFn) {
+ CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
+ FinishFunction();
+ return;
+ }
+
+ // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ CodeGenTypes &Types = CGM.getTypes();
+ ValueDecl *Cmd = OMD->getCmdDecl();
+ llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
+ QualType IdTy = getContext().getObjCIdType();
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
+ Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
+ Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ // FIXME: We shouldn't need to get the function info here, the
+ // runtime already should have computed it to build the function.
+ RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args,
+ FunctionType::ExtInfo()),
+ GetPropertyFn, ReturnValueSlot(), Args);
+ // We need to fix the type here. Ivars with copy & retain are
+ // always objects so we don't need to worry about complex or
+ // aggregates.
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ Types.ConvertType(PD->getType())));
+ EmitReturnOfRValue(RV, PD->getType());
+ } else {
+ if (Ivar->getType()->isAnyComplexType()) {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ ComplexPairTy Pair = LoadComplexFromAddr(LV.getAddress(),
+ LV.isVolatileQualified());
+ StoreComplexToAddr(Pair, ReturnValue, LV.isVolatileQualified());
+ }
+ else if (hasAggregateLLVMType(Ivar->getType())) {
+ bool IsStrong = false;
+ if ((IsAtomic || (IsStrong = IvarTypeWithAggrGCObjects(Ivar->getType())))
+ && CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect
+ && CGM.getObjCRuntime().GetCopyStructFunction()) {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ llvm::Value *GetCopyStructFn =
+ CGM.getObjCRuntime().GetCopyStructFunction();
+ CodeGenTypes &Types = CGM.getTypes();
+ // objc_copyStruct (ReturnValue, &structIvar,
+ // sizeof (Type of Ivar), isAtomic, false);
+ CallArgList Args;
+ RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue,
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ // sizeof (Type of Ivar)
+ uint64_t Size = getContext().getTypeSize(Ivar->getType()) / 8;
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy), Size);
+ Args.push_back(std::make_pair(RValue::get(SizeVal),
+ getContext().LongTy));
+ llvm::Value *isAtomic =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
+ IsAtomic ? 1 : 0);
+ Args.push_back(std::make_pair(RValue::get(isAtomic),
+ getContext().BoolTy));
+ llvm::Value *hasStrong =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
+ IsStrong ? 1 : 0);
+ Args.push_back(std::make_pair(RValue::get(hasStrong),
+ getContext().BoolTy));
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ GetCopyStructFn, ReturnValueSlot(), Args);
+ }
+ else {
+ if (PID->getGetterCXXConstructor()) {
+ ReturnStmt *Stmt =
+ new (getContext()) ReturnStmt(SourceLocation(),
+ PID->getGetterCXXConstructor(),
+ 0);
+ EmitReturnStmt(*Stmt);
+ }
+ else {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
+ }
+ }
+ } else {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ CodeGenTypes &Types = CGM.getTypes();
+ RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ Types.ConvertType(PD->getType())));
+ EmitReturnOfRValue(RV, PD->getType());
+ }
+ }
+
+ FinishFunction();
+}
+
+/// GenerateObjCSetter - Generate an Objective-C property setter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
+ assert(OMD && "Invalid call to generate setter (empty method)");
+ StartObjCMethod(OMD, IMP->getClassInterface());
+
+ bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
+ bool IsAtomic =
+ !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
+
+ // Determine if we should use an objc_setProperty call for
+ // this. Properties with 'copy' semantics always use it, as do
+ // non-atomic properties with 'release' semantics as long as we are
+ // not in gc-only mode.
+ if (IsCopy ||
+ (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
+ PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
+ llvm::Value *SetPropertyFn =
+ CGM.getObjCRuntime().GetPropertySetFunction();
+
+ if (!SetPropertyFn) {
+ CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
+ FinishFunction();
+ return;
+ }
+
+ // Emit objc_setProperty((id) self, _cmd, offset, arg,
+ // <is-atomic>, <is-copy>).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ CodeGenTypes &Types = CGM.getTypes();
+ ValueDecl *Cmd = OMD->getCmdDecl();
+ llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
+ QualType IdTy = getContext().getObjCIdType();
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
+ llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
+ llvm::Value *ArgAsId =
+ Builder.CreateBitCast(Builder.CreateLoad(Arg, "arg"),
+ Types.ConvertType(IdTy));
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ llvm::Value *False =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
+ Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
+ Args.push_back(std::make_pair(RValue::get(ArgAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False),
+ getContext().BoolTy));
+ Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False),
+ getContext().BoolTy));
+ // FIXME: We shouldn't need to get the function info here, the runtime
+ // already should have computed it to build the function.
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ SetPropertyFn,
+ ReturnValueSlot(), Args);
+ } else if (IsAtomic && hasAggregateLLVMType(Ivar->getType()) &&
+ !Ivar->getType()->isAnyComplexType() &&
+ IndirectObjCSetterArg(*CurFnInfo)
+ && CGM.getObjCRuntime().GetCopyStructFunction()) {
+ // objc_copyStruct (&structIvar, &Arg,
+ // sizeof (struct something), true, false);
+ llvm::Value *GetCopyStructFn =
+ CGM.getObjCRuntime().GetCopyStructFunction();
+ CodeGenTypes &Types = CGM.getTypes();
+ CallArgList Args;
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
+ RValue RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
+ llvm::Value *ArgAsPtrTy =
+ Builder.CreateBitCast(Arg,
+ Types.ConvertType(getContext().VoidPtrTy));
+ RV = RValue::get(ArgAsPtrTy);
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ // sizeof (Type of Ivar)
+ uint64_t Size = getContext().getTypeSize(Ivar->getType()) / 8;
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy), Size);
+ Args.push_back(std::make_pair(RValue::get(SizeVal),
+ getContext().LongTy));
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ llvm::Value *False =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
+ Args.push_back(std::make_pair(RValue::get(False), getContext().BoolTy));
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ GetCopyStructFn, ReturnValueSlot(), Args);
+ } else if (PID->getSetterCXXAssignment()) {
+ EmitAnyExpr(PID->getSetterCXXAssignment(), (llvm::Value *)0, false, true,
+ false);
+
+ } else {
+ // FIXME: Find a clean way to avoid AST node creation.
+ SourceLocation Loc = PD->getLocation();
+ ValueDecl *Self = OMD->getSelfDecl();
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ DeclRefExpr Base(Self, Self->getType(), Loc);
+ ParmVarDecl *ArgDecl = *OMD->param_begin();
+ DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), Loc);
+ ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, true, true);
+
+ // The property type can differ from the ivar type in some situations with
+ // Objective-C pointer types, we can always bit cast the RHS in these cases.
+ if (getContext().getCanonicalType(Ivar->getType()) !=
+ getContext().getCanonicalType(ArgDecl->getType())) {
+ ImplicitCastExpr ArgCasted(ImplicitCastExpr::OnStack,
+ Ivar->getType(), CK_BitCast, &Arg,
+ VK_RValue);
+ BinaryOperator Assign(&IvarRef, &ArgCasted, BO_Assign,
+ Ivar->getType(), Loc);
+ EmitStmt(&Assign);
+ } else {
+ BinaryOperator Assign(&IvarRef, &Arg, BO_Assign,
+ Ivar->getType(), Loc);
+ EmitStmt(&Assign);
+ }
+ }
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD,
+ bool ctor) {
+ llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> IvarInitializers;
+ MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
+ StartObjCMethod(MD, IMP->getClassInterface());
+ for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
+ E = IMP->init_end(); B != E; ++B) {
+ CXXBaseOrMemberInitializer *Member = (*B);
+ IvarInitializers.push_back(Member);
+ }
+ if (ctor) {
+ for (unsigned I = 0, E = IvarInitializers.size(); I != E; ++I) {
+ CXXBaseOrMemberInitializer *IvarInit = IvarInitializers[I];
+ FieldDecl *Field = IvarInit->getMember();
+ QualType FieldType = Field->getType();
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
+ LoadObjCSelf(), Ivar, 0);
+ EmitAggExpr(IvarInit->getInit(), LV.getAddress(),
+ LV.isVolatileQualified(), false, true);
+ }
+ // constructor returns 'self'.
+ CodeGenTypes &Types = CGM.getTypes();
+ QualType IdTy(CGM.getContext().getObjCIdType());
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
+ } else {
+ // dtor
+ for (size_t i = IvarInitializers.size(); i > 0; --i) {
+ FieldDecl *Field = IvarInitializers[i - 1]->getMember();
+ QualType FieldType = Field->getType();
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(FieldType);
+
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
+ LoadObjCSelf(), Ivar, 0);
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor();
+ if (!Dtor->isTrivial()) {
+ if (Array) {
+ const llvm::Type *BasePtr = ConvertType(FieldType);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(LV.getAddress(), BasePtr);
+ EmitCXXAggrDestructorCall(Dtor,
+ Array, BaseAddrPtr);
+ } else {
+ EmitCXXDestructorCall(Dtor,
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ LV.getAddress());
+ }
+ }
+ }
+ }
+ FinishFunction();
+}
+
+bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
+ CGFunctionInfo::const_arg_iterator it = FI.arg_begin();
+ it++; it++;
+ const ABIArgInfo &AI = it->info;
+ // FIXME. Is this sufficient check?
+ return (AI.getKind() == ABIArgInfo::Indirect);
+}
+
+bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return false;
+ if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
+ return FDTTy->getDecl()->hasObjectMember();
+ return false;
+}
+
+llvm::Value *CodeGenFunction::LoadObjCSelf() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
+}
+
+QualType CodeGenFunction::TypeOfSelfObject() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
+ const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
+ getContext().getCanonicalType(selfDecl->getType()));
+ return PTy->getPointeeType();
+}
+
+RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
+ const Selector &S,
+ ReturnValueSlot Return) {
+ llvm::Value *Receiver = LoadObjCSelf();
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isClassMessage = OMD->isClassMethod();
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ Return,
+ Exp->getType(),
+ S,
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ CallArgList());
+
+}
+
+RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp,
+ ReturnValueSlot Return) {
+ Exp = Exp->IgnoreParens();
+ // FIXME: Split it into two separate routines.
+ if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
+ Selector S = E->getProperty()->getGetterName();
+ if (isa<ObjCSuperExpr>(E->getBase()))
+ return EmitObjCSuperPropertyGet(E, S, Return);
+ return CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Return, Exp->getType(), S,
+ EmitScalarExpr(E->getBase()),
+ CallArgList());
+ } else {
+ const ObjCImplicitSetterGetterRefExpr *KE =
+ cast<ObjCImplicitSetterGetterRefExpr>(Exp);
+ Selector S = KE->getGetterMethod()->getSelector();
+ llvm::Value *Receiver;
+ if (KE->getInterfaceDecl()) {
+ const ObjCInterfaceDecl *OID = KE->getInterfaceDecl();
+ Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
+ } else if (isa<ObjCSuperExpr>(KE->getBase()))
+ return EmitObjCSuperPropertyGet(KE, S, Return);
+ else
+ Receiver = EmitScalarExpr(KE->getBase());
+ return CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Return, Exp->getType(), S,
+ Receiver,
+ CallArgList(), KE->getInterfaceDecl());
+ }
+}
+
+void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp,
+ const Selector &S,
+ RValue Src) {
+ CallArgList Args;
+ llvm::Value *Receiver = LoadObjCSelf();
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isClassMessage = OMD->isClassMethod();
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ Args.push_back(std::make_pair(Src, Exp->getType()));
+ CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ ReturnValueSlot(),
+ getContext().VoidTy,
+ S,
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args);
+ return;
+}
+
+void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
+ RValue Src) {
+ // FIXME: Split it into two separate routines.
+ if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
+ Selector S = E->getProperty()->getSetterName();
+ if (isa<ObjCSuperExpr>(E->getBase())) {
+ EmitObjCSuperPropertySet(E, S, Src);
+ return;
+ }
+ CallArgList Args;
+ Args.push_back(std::make_pair(Src, E->getType()));
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, S,
+ EmitScalarExpr(E->getBase()),
+ Args);
+ } else if (const ObjCImplicitSetterGetterRefExpr *E =
+ dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) {
+ const ObjCMethodDecl *SetterMD = E->getSetterMethod();
+ Selector S = SetterMD->getSelector();
+ CallArgList Args;
+ llvm::Value *Receiver;
+ if (E->getInterfaceDecl()) {
+ const ObjCInterfaceDecl *OID = E->getInterfaceDecl();
+ Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
+ } else if (isa<ObjCSuperExpr>(E->getBase())) {
+ EmitObjCSuperPropertySet(E, S, Src);
+ return;
+ } else
+ Receiver = EmitScalarExpr(E->getBase());
+ ObjCMethodDecl::param_iterator P = SetterMD->param_begin();
+ Args.push_back(std::make_pair(Src, (*P)->getType()));
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, S,
+ Receiver,
+ Args, E->getInterfaceDecl());
+ } else
+ assert (0 && "bad expression node in EmitObjCPropertySet");
+}
+
+void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
+ llvm::Constant *EnumerationMutationFn =
+ CGM.getObjCRuntime().EnumerationMutationFunction();
+ llvm::Value *DeclAddress;
+ QualType ElementTy;
+
+ if (!EnumerationMutationFn) {
+ CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
+ return;
+ }
+
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
+ EmitStmt(SD);
+ assert(HaveInsertPoint() && "DeclStmt destroyed insert point!");
+ const Decl* D = SD->getSingleDecl();
+ ElementTy = cast<ValueDecl>(D)->getType();
+ DeclAddress = LocalDeclMap[D];
+ } else {
+ ElementTy = cast<Expr>(S.getElement())->getType();
+ DeclAddress = 0;
+ }
+
+ // Fast enumeration state.
+ QualType StateTy = getContext().getObjCFastEnumerationStateType();
+ llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
+ EmitNullInitialization(StatePtr, StateTy);
+
+ // Number of elements in the items array.
+ static const unsigned NumItems = 16;
+
+ // Get selector
+ IdentifierInfo *II[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ Selector FastEnumSel =
+ CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
+
+ QualType ItemsTy =
+ getContext().getConstantArrayType(getContext().getObjCIdType(),
+ llvm::APInt(32, NumItems),
+ ArrayType::Normal, 0);
+ llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
+
+ llvm::Value *Collection = EmitScalarExpr(S.getCollection());
+
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(StatePtr),
+ getContext().getPointerType(StateTy)));
+
+ Args.push_back(std::make_pair(RValue::get(ItemsPtr),
+ getContext().getPointerType(ItemsTy)));
+
+ const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
+ Args.push_back(std::make_pair(RValue::get(Count),
+ getContext().UnsignedLongTy));
+
+ RValue CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, Args);
+
+ llvm::Value *LimitPtr = CreateMemTemp(getContext().UnsignedLongTy,
+ "limit.ptr");
+ Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+
+ llvm::BasicBlock *NoElements = createBasicBlock("noelements");
+ llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations");
+
+ llvm::Value *Limit = Builder.CreateLoad(LimitPtr);
+ llvm::Value *Zero = llvm::Constant::getNullValue(UnsignedLongLTy);
+
+ llvm::Value *IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
+ Builder.CreateCondBr(IsZero, NoElements, SetStartMutations);
+
+ EmitBlock(SetStartMutations);
+
+ llvm::Value *StartMutationsPtr = CreateMemTemp(getContext().UnsignedLongTy);
+
+ llvm::Value *StateMutationsPtrPtr =
+ Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
+ llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
+ "mutationsptr");
+
+ llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
+ "mutations");
+
+ Builder.CreateStore(StateMutations, StartMutationsPtr);
+
+ llvm::BasicBlock *LoopStart = createBasicBlock("loopstart");
+ EmitBlock(LoopStart);
+
+ llvm::Value *CounterPtr = CreateMemTemp(getContext().UnsignedLongTy,
+ "counter.ptr");
+ Builder.CreateStore(Zero, CounterPtr);
+
+ llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
+ EmitBlock(LoopBody);
+
+ StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
+ StateMutations = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+
+ llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
+ "mutations");
+ llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
+ StartMutations,
+ "tobool");
+
+
+ llvm::BasicBlock *WasMutated = createBasicBlock("wasmutated");
+ llvm::BasicBlock *WasNotMutated = createBasicBlock("wasnotmutated");
+
+ Builder.CreateCondBr(MutationsEqual, WasNotMutated, WasMutated);
+
+ EmitBlock(WasMutated);
+ llvm::Value *V =
+ Builder.CreateBitCast(Collection,
+ ConvertType(getContext().getObjCIdType()),
+ "tmp");
+ CallArgList Args2;
+ Args2.push_back(std::make_pair(RValue::get(V),
+ getContext().getObjCIdType()));
+ // FIXME: We shouldn't need to get the function info here, the runtime already
+ // should have computed it to build the function.
+ EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2,
+ FunctionType::ExtInfo()),
+ EnumerationMutationFn, ReturnValueSlot(), Args2);
+
+ EmitBlock(WasNotMutated);
+
+ llvm::Value *StateItemsPtr =
+ Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+
+ llvm::Value *Counter = Builder.CreateLoad(CounterPtr, "counter");
+
+ llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr,
+ "stateitems");
+
+ llvm::Value *CurrentItemPtr =
+ Builder.CreateGEP(EnumStateItems, Counter, "currentitem.ptr");
+
+ llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr, "currentitem");
+
+ // Cast the item to the right type.
+ CurrentItem = Builder.CreateBitCast(CurrentItem,
+ ConvertType(ElementTy), "tmp");
+
+ if (!DeclAddress) {
+ LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+
+ // Set the value to null.
+ Builder.CreateStore(CurrentItem, LV.getAddress());
+ } else
+ Builder.CreateStore(CurrentItem, DeclAddress);
+
+ // Increment the counter.
+ Counter = Builder.CreateAdd(Counter,
+ llvm::ConstantInt::get(UnsignedLongLTy, 1));
+ Builder.CreateStore(Counter, CounterPtr);
+
+ JumpDest LoopEnd = getJumpDestInCurrentScope("loopend");
+ JumpDest AfterBody = getJumpDestInCurrentScope("afterbody");
+
+ BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
+
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ EmitBlock(AfterBody.getBlock());
+
+ llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
+
+ Counter = Builder.CreateLoad(CounterPtr);
+ Limit = Builder.CreateLoad(LimitPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, Limit, "isless");
+ Builder.CreateCondBr(IsLess, LoopBody, FetchMore);
+
+ // Fetch more elements.
+ EmitBlock(FetchMore);
+
+ CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, Args);
+ Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+ Limit = Builder.CreateLoad(LimitPtr);
+
+ IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
+ Builder.CreateCondBr(IsZero, NoElements, LoopStart);
+
+ // No more elements.
+ EmitBlock(NoElements);
+
+ if (!DeclAddress) {
+ // If the element was not a declaration, set it to be null.
+
+ LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+
+ // Set the value to null.
+ Builder.CreateStore(llvm::Constant::getNullValue(ConvertType(ElementTy)),
+ LV.getAddress());
+ }
+
+ EmitBlock(LoopEnd.getBlock());
+}
+
+void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
+ CGM.getObjCRuntime().EmitTryStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
+ CGM.getObjCRuntime().EmitThrowStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtSynchronizedStmt(
+ const ObjCAtSynchronizedStmt &S) {
+ CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
+}
+
+CGObjCRuntime::~CGObjCRuntime() {}
+
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
new file mode 100644
index 0000000..d7960be
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -0,0 +1,2218 @@
+//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targetting the GNU runtime. The
+// class in this file generates structures used by the GNU Objective-C runtime
+// library. These structures are defined in objc/objc.h and objc/objc-api.h in
+// the GNU runtime distribution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGException.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetData.h"
+
+#include <map>
+
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::dyn_cast;
+
+// The version of the runtime that this class targets. Must match the version
+// in the runtime.
+static const int RuntimeVersion = 8;
+static const int NonFragileRuntimeVersion = 9;
+static const int ProtocolVersion = 2;
+static const int NonFragileProtocolVersion = 3;
+
+namespace {
+class CGObjCGNU : public CodeGen::CGObjCRuntime {
+private:
+ CodeGen::CodeGenModule &CGM;
+ llvm::Module &TheModule;
+ const llvm::PointerType *SelectorTy;
+ const llvm::IntegerType *Int8Ty;
+ const llvm::PointerType *PtrToInt8Ty;
+ const llvm::FunctionType *IMPTy;
+ const llvm::PointerType *IdTy;
+ const llvm::PointerType *PtrToIdTy;
+ CanQualType ASTIdTy;
+ const llvm::IntegerType *IntTy;
+ const llvm::PointerType *PtrTy;
+ const llvm::IntegerType *LongTy;
+ const llvm::PointerType *PtrToIntTy;
+ llvm::GlobalAlias *ClassPtrAlias;
+ llvm::GlobalAlias *MetaClassPtrAlias;
+ std::vector<llvm::Constant*> Classes;
+ std::vector<llvm::Constant*> Categories;
+ std::vector<llvm::Constant*> ConstantStrings;
+ llvm::StringMap<llvm::Constant*> ObjCStrings;
+ llvm::Function *LoadFunction;
+ llvm::StringMap<llvm::Constant*> ExistingProtocols;
+ typedef std::pair<std::string, std::string> TypedSelector;
+ std::map<TypedSelector, llvm::GlobalAlias*> TypedSelectors;
+ llvm::StringMap<llvm::GlobalAlias*> UntypedSelectors;
+ // Selectors that we don't emit in GC mode
+ Selector RetainSel, ReleaseSel, AutoreleaseSel;
+ // Functions used for GC.
+ llvm::Constant *IvarAssignFn, *StrongCastAssignFn, *MemMoveFn, *WeakReadFn,
+ *WeakAssignFn, *GlobalAssignFn;
+ // Some zeros used for GEPs in lots of places.
+ llvm::Constant *Zeros[2];
+ llvm::Constant *NULLPtr;
+ llvm::LLVMContext &VMContext;
+ /// Metadata kind used to tie method lookups to message sends.
+ unsigned msgSendMDKind;
+private:
+ llvm::Constant *GenerateIvarList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets);
+ llvm::Constant *GenerateMethodList(const std::string &ClassName,
+ const std::string &CategoryName,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ bool isClassMethodList);
+ llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+ llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID,
+ llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
+ llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
+ llvm::Constant *GenerateProtocolList(
+ const llvm::SmallVectorImpl<std::string> &Protocols);
+ // To ensure that all protocols are seen by the runtime, we add a category on
+ // a class defined in the runtime, declaring no methods, but adopting the
+ // protocols.
+ void GenerateProtocolHolderCategory(void);
+ llvm::Constant *GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties,
+ bool isMeta=false);
+ llvm::Constant *GenerateProtocolMethodList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes);
+ llvm::Constant *MakeConstantString(const std::string &Str, const std::string
+ &Name="");
+ llvm::Constant *ExportUniqueString(const std::string &Str, const std::string
+ prefix);
+ llvm::Constant *MakeGlobal(const llvm::StructType *Ty,
+ std::vector<llvm::Constant*> &V, llvm::StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage=llvm::GlobalValue::InternalLinkage);
+ llvm::Constant *MakeGlobal(const llvm::ArrayType *Ty,
+ std::vector<llvm::Constant*> &V, llvm::StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage=llvm::GlobalValue::InternalLinkage);
+ llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+ void EmitClassRef(const std::string &className);
+ llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, const llvm::Type *Ty){
+ if (V->getType() == Ty) return V;
+ return B.CreateBitCast(V, Ty);
+ }
+public:
+ CGObjCGNU(CodeGen::CodeGenModule &cgm);
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *);
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval = false);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method);
+ virtual llvm::Constant *GetEHType(QualType T);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD);
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+ virtual llvm::Function *ModuleInitFunction();
+ virtual llvm::Function *GetPropertyGetFunction();
+ virtual llvm::Function *GetPropertySetFunction();
+ virtual llvm::Function *GetCopyStructFunction();
+ virtual llvm::Constant *EnumerationMutationFunction();
+
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal=false);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size);
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+ virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
+ const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &) {
+ return NULLPtr;
+ }
+};
+} // end anonymous namespace
+
+
+/// Emits a reference to a dummy variable which is emitted with each class.
+/// This ensures that a linker error will be generated when trying to link
+/// together modules where a referenced class is not defined.
+void CGObjCGNU::EmitClassRef(const std::string &className) {
+ std::string symbolRef = "__objc_class_ref_" + className;
+ // Don't emit two copies of the same symbol
+ if (TheModule.getGlobalVariable(symbolRef))
+ return;
+ std::string symbolName = "__objc_class_name_" + className;
+ llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(symbolName);
+ if (!ClassSymbol) {
+ ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, 0, symbolName);
+ }
+ new llvm::GlobalVariable(TheModule, ClassSymbol->getType(), true,
+ llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
+}
+
+static std::string SymbolNameForMethod(const std::string &ClassName, const
+ std::string &CategoryName, const std::string &MethodName, bool isClassMethod)
+{
+ std::string MethodNameColonStripped = MethodName;
+ std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
+ ':', '_');
+ return std::string(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
+ CategoryName + "_" + MethodNameColonStripped;
+}
+static std::string MangleSelectorTypes(const std::string &TypeString) {
+ std::string Mangled = TypeString;
+ // Simple mangling to avoid breaking when we mix JIT / static code.
+ // Not part of the ABI, subject to change without notice.
+ std::replace(Mangled.begin(), Mangled.end(), '@', '_');
+ std::replace(Mangled.begin(), Mangled.end(), ':', 'J');
+ std::replace(Mangled.begin(), Mangled.end(), '*', 'e');
+ std::replace(Mangled.begin(), Mangled.end(), '#', 'E');
+ std::replace(Mangled.begin(), Mangled.end(), ':', 'j');
+ std::replace(Mangled.begin(), Mangled.end(), '(', 'g');
+ std::replace(Mangled.begin(), Mangled.end(), ')', 'G');
+ std::replace(Mangled.begin(), Mangled.end(), '[', 'h');
+ std::replace(Mangled.begin(), Mangled.end(), ']', 'H');
+ return Mangled;
+}
+
+CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
+ : CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0),
+ MetaClassPtrAlias(0), VMContext(cgm.getLLVMContext()) {
+
+ msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
+
+ IntTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+ LongTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy));
+
+ Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ // C string type. Used in lots of places.
+ PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+
+ Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
+ Zeros[1] = Zeros[0];
+ NULLPtr = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ // Get the selector Type.
+ QualType selTy = CGM.getContext().getObjCSelType();
+ if (QualType() == selTy) {
+ SelectorTy = PtrToInt8Ty;
+ } else {
+ SelectorTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(selTy));
+ }
+
+ PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
+ PtrTy = PtrToInt8Ty;
+
+ // Object type
+ ASTIdTy = CGM.getContext().getCanonicalType(CGM.getContext().getObjCIdType());
+ if (QualType() == ASTIdTy) {
+ IdTy = PtrToInt8Ty;
+ } else {
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ }
+ PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
+
+ // IMP type
+ std::vector<const llvm::Type*> IMPArgs;
+ IMPArgs.push_back(IdTy);
+ IMPArgs.push_back(SelectorTy);
+ IMPTy = llvm::FunctionType::get(IdTy, IMPArgs, true);
+
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ // Get selectors needed in GC mode
+ RetainSel = GetNullarySelector("retain", CGM.getContext());
+ ReleaseSel = GetNullarySelector("release", CGM.getContext());
+ AutoreleaseSel = GetNullarySelector("autorelease", CGM.getContext());
+
+ // Get functions needed in GC mode
+
+ // id objc_assign_ivar(id, id, ptrdiff_t);
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ Args.push_back(PtrToIdTy);
+ // FIXME: ptrdiff_t
+ Args.push_back(LongTy);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(IdTy, Args, false);
+ IvarAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+ // id objc_assign_strongCast (id, id*)
+ Args.pop_back();
+ FTy = llvm::FunctionType::get(IdTy, Args, false);
+ StrongCastAssignFn =
+ CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+ // id objc_assign_global(id, id*);
+ FTy = llvm::FunctionType::get(IdTy, Args, false);
+ GlobalAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+ // id objc_assign_weak(id, id*);
+ FTy = llvm::FunctionType::get(IdTy, Args, false);
+ WeakAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+ // id objc_read_weak(id*);
+ Args.clear();
+ Args.push_back(PtrToIdTy);
+ FTy = llvm::FunctionType::get(IdTy, Args, false);
+ WeakReadFn = CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+ // void *objc_memmove_collectable(void*, void *, size_t);
+ Args.clear();
+ Args.push_back(PtrToInt8Ty);
+ Args.push_back(PtrToInt8Ty);
+ // FIXME: size_t
+ Args.push_back(LongTy);
+ FTy = llvm::FunctionType::get(IdTy, Args, false);
+ MemMoveFn = CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+ }
+}
+
+// This has to perform the lookup every time, since posing and related
+// techniques can modify the name -> class mapping.
+llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) {
+ llvm::Value *ClassName = CGM.GetAddrOfConstantCString(OID->getNameAsString());
+ // With the incompatible ABI, this will need to be replaced with a direct
+ // reference to the class symbol. For the compatible nonfragile ABI we are
+ // still performing this lookup at run time but emitting the symbol for the
+ // class externally so that we can make the switch later.
+ EmitClassRef(OID->getNameAsString());
+ ClassName = Builder.CreateStructGEP(ClassName, 0);
+
+ std::vector<const llvm::Type*> Params(1, PtrToInt8Ty);
+ llvm::Constant *ClassLookupFn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy,
+ Params,
+ true),
+ "objc_lookup_class");
+ return Builder.CreateCall(ClassLookupFn, ClassName);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval) {
+ llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()];
+ if (US == 0)
+ US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy),
+ llvm::GlobalValue::PrivateLinkage,
+ ".objc_untyped_selector_alias"+Sel.getAsString(),
+ NULL, &TheModule);
+ if (lval)
+ return US;
+ return Builder.CreateLoad(US);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+
+ std::string SelName = Method->getSelector().getAsString();
+ std::string SelTypes;
+ CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
+ // Typed selectors
+ TypedSelector Selector = TypedSelector(SelName,
+ SelTypes);
+
+ // If it's already cached, return it.
+ if (TypedSelectors[Selector]) {
+ return Builder.CreateLoad(TypedSelectors[Selector]);
+ }
+
+ // If it isn't, cache it.
+ llvm::GlobalAlias *Sel = new llvm::GlobalAlias(
+ llvm::PointerType::getUnqual(SelectorTy),
+ llvm::GlobalValue::PrivateLinkage, ".objc_selector_alias" + SelName,
+ NULL, &TheModule);
+ TypedSelectors[Selector] = Sel;
+
+ return Builder.CreateLoad(Sel);
+}
+
+llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
+ llvm_unreachable("asking for catch type for ObjC type in GNU runtime");
+ return 0;
+}
+
+llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str,
+ const std::string &Name) {
+ llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+}
+llvm::Constant *CGObjCGNU::ExportUniqueString(const std::string &Str,
+ const std::string prefix) {
+ std::string name = prefix + Str;
+ llvm::Constant *ConstStr = TheModule.getGlobalVariable(name);
+ if (!ConstStr) {
+ llvm::Constant *value = llvm::ConstantArray::get(VMContext, Str, true);
+ ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
+ llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+ }
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+}
+
+llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
+ std::vector<llvm::Constant*> &V, llvm::StringRef Name,
+ llvm::GlobalValue::LinkageTypes linkage) {
+ llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ llvm::GlobalValue::InternalLinkage, C, Name);
+}
+
+llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
+ std::vector<llvm::Constant*> &V, llvm::StringRef Name,
+ llvm::GlobalValue::LinkageTypes linkage) {
+ llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ llvm::GlobalValue::InternalLinkage, C, Name);
+}
+
+/// Generate an NSConstantString object.
+llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
+
+ std::string Str = SL->getString().str();
+
+ // Look for an existing one
+ llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
+ if (old != ObjCStrings.end())
+ return old->getValue();
+
+ std::vector<llvm::Constant*> Ivars;
+ Ivars.push_back(NULLPtr);
+ Ivars.push_back(MakeConstantString(Str));
+ Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
+ llvm::Constant *ObjCStr = MakeGlobal(
+ llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
+ Ivars, ".objc_str");
+ ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
+ ObjCStrings[Str] = ObjCStr;
+ ConstantStrings.push_back(ObjCStr);
+ return ObjCStr;
+}
+
+///Generates a message send where the super is the receiver. This is a message
+///send to self with special delivery semantics indicating which class's method
+///should be called.
+CodeGen::RValue
+CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ if (Sel == RetainSel || Sel == AutoreleaseSel) {
+ return RValue::get(Receiver);
+ }
+ if (Sel == ReleaseSel) {
+ return RValue::get(0);
+ }
+ }
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *cmd = GetSelector(Builder, Sel);
+
+
+ CallArgList ActualArgs;
+
+ ActualArgs.push_back(
+ std::make_pair(RValue::get(Builder.CreateBitCast(Receiver, IdTy)),
+ ASTIdTy));
+ ActualArgs.push_back(std::make_pair(RValue::get(cmd),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
+ FunctionType::ExtInfo());
+ const llvm::FunctionType *impType =
+ Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+
+ llvm::Value *ReceiverClass = 0;
+ if (isCategoryImpl) {
+ llvm::Constant *classLookupFunction = 0;
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ if (IsClassMessage) {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, Params, true), "objc_get_meta_class");
+ } else {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, Params, true), "objc_get_class");
+ }
+ ReceiverClass = Builder.CreateCall(classLookupFunction,
+ MakeConstantString(Class->getNameAsString()));
+ } else {
+ // Set up global aliases for the metaclass or class pointer if they do not
+ // already exist. These will are forward-references which will be set to
+ // pointers to the class and metaclass structure created for the runtime
+ // load function. To send a message to super, we look up the value of the
+ // super_class pointer from either the class or metaclass structure.
+ if (IsClassMessage) {
+ if (!MetaClassPtrAlias) {
+ MetaClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_metaclass_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = MetaClassPtrAlias;
+ } else {
+ if (!ClassPtrAlias) {
+ ClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_class_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = ClassPtrAlias;
+ }
+ }
+ // Cast the pointer to a simplified version of the class structure
+ ReceiverClass = Builder.CreateBitCast(ReceiverClass,
+ llvm::PointerType::getUnqual(
+ llvm::StructType::get(VMContext, IdTy, IdTy, NULL)));
+ // Get the superclass pointer
+ ReceiverClass = Builder.CreateStructGEP(ReceiverClass, 1);
+ // Load the superclass pointer
+ ReceiverClass = Builder.CreateLoad(ReceiverClass);
+ // Construct the structure used to look up the IMP
+ llvm::StructType *ObjCSuperTy = llvm::StructType::get(VMContext,
+ Receiver->getType(), IdTy, NULL);
+ llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy);
+
+ Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuper, 0));
+ Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
+
+ // Get the IMP
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy));
+ Params.push_back(SelectorTy);
+
+ llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+ llvm::Value *imp;
+
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ // The lookup function returns a slot, which can be safely cached.
+ llvm::Type *SlotTy = llvm::StructType::get(VMContext, PtrTy, PtrTy, PtrTy,
+ IntTy, llvm::PointerType::getUnqual(impType), NULL);
+
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(SlotTy), Params, true),
+ "objc_slot_lookup_super");
+
+ llvm::CallInst *slot = Builder.CreateCall(lookupFunction, lookupArgs,
+ lookupArgs+2);
+ slot->setOnlyReadsMemory();
+
+ imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+ } else {
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(impType), Params, true),
+ "objc_msg_lookup_super");
+ imp = Builder.CreateCall(lookupFunction, lookupArgs, lookupArgs+2);
+ }
+
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class->getSuperClass()->getNameAsString()),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsClassMessage)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD, 3);
+
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
+ 0, &call);
+ call->setMetadata(msgSendMDKind, node);
+ return msgRet;
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue
+CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ // Strip out message sends to retain / release in GC mode
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ if (Sel == RetainSel || Sel == AutoreleaseSel) {
+ return RValue::get(Receiver);
+ }
+ if (Sel == ReleaseSel) {
+ return RValue::get(0);
+ }
+ }
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // If the return type is something that goes in an integer register, the
+ // runtime will handle 0 returns. For other cases, we fill in the 0 value
+ // ourselves.
+ //
+ // The language spec says the result of this kind of message send is
+ // undefined, but lots of people seem to have forgotten to read that
+ // paragraph and insist on sending messages to nil that have structure
+ // returns. With GCC, this generates a random return value (whatever happens
+ // to be on the stack / in those registers at the time) on most platforms,
+ // and generates a SegV on SPARC. With LLVM it corrupts the stack.
+ bool isPointerSizedReturn = false;
+ if (ResultType->isAnyPointerType() ||
+ ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType())
+ isPointerSizedReturn = true;
+
+ llvm::BasicBlock *startBB = 0;
+ llvm::BasicBlock *messageBB = 0;
+ llvm::BasicBlock *continueBB = 0;
+
+ if (!isPointerSizedReturn) {
+ startBB = Builder.GetInsertBlock();
+ messageBB = CGF.createBasicBlock("msgSend");
+ continueBB = CGF.createBasicBlock("continue");
+
+ llvm::Value *isNil = Builder.CreateICmpEQ(Receiver,
+ llvm::Constant::getNullValue(Receiver->getType()));
+ Builder.CreateCondBr(isNil, continueBB, messageBB);
+ CGF.EmitBlock(messageBB);
+ }
+
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ llvm::Value *cmd;
+ if (Method)
+ cmd = GetSelector(Builder, Method);
+ else
+ cmd = GetSelector(Builder, Sel);
+ CallArgList ActualArgs;
+
+ Receiver = Builder.CreateBitCast(Receiver, IdTy);
+ ActualArgs.push_back(
+ std::make_pair(RValue::get(Receiver), ASTIdTy));
+ ActualArgs.push_back(std::make_pair(RValue::get(cmd),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
+ FunctionType::ExtInfo());
+ const llvm::FunctionType *impType =
+ Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class ? Class->getNameAsString() :""),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), Class!=0)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD, 3);
+
+
+ llvm::Value *imp;
+ // For sender-aware dispatch, we pass the sender as the third argument to a
+ // lookup function. When sending messages from C code, the sender is nil.
+ // objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+
+ std::vector<const llvm::Type*> Params;
+ llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType());
+ Builder.CreateStore(Receiver, ReceiverPtr);
+ Params.push_back(ReceiverPtr->getType());
+ Params.push_back(SelectorTy);
+ llvm::Value *self;
+
+ if (isa<ObjCMethodDecl>(CGF.CurCodeDecl)) {
+ self = CGF.LoadObjCSelf();
+ } else {
+ self = llvm::ConstantPointerNull::get(IdTy);
+ }
+
+ Params.push_back(self->getType());
+
+ // The lookup function returns a slot, which can be safely cached.
+ llvm::Type *SlotTy = llvm::StructType::get(VMContext, PtrTy, PtrTy, PtrTy,
+ IntTy, llvm::PointerType::getUnqual(impType), NULL);
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(SlotTy), Params, true),
+ "objc_msg_lookup_sender");
+
+ // The lookup function is guaranteed not to capture the receiver pointer.
+ if (llvm::Function *LookupFn = dyn_cast<llvm::Function>(lookupFunction)) {
+ LookupFn->setDoesNotCapture(1);
+ }
+
+ llvm::CallInst *slot =
+ Builder.CreateCall3(lookupFunction, ReceiverPtr, cmd, self);
+ slot->setOnlyReadsMemory();
+ slot->setMetadata(msgSendMDKind, node);
+
+ imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+
+ // The lookup function may have changed the receiver, so make sure we use
+ // the new one.
+ ActualArgs[0] = std::make_pair(RValue::get(
+ Builder.CreateLoad(ReceiverPtr, true)), ASTIdTy);
+ } else {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Receiver->getType());
+ Params.push_back(SelectorTy);
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(impType), Params, true),
+ "objc_msg_lookup");
+
+ imp = Builder.CreateCall2(lookupFunction, Receiver, cmd);
+ cast<llvm::CallInst>(imp)->setMetadata(msgSendMDKind, node);
+ }
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
+ 0, &call);
+ call->setMetadata(msgSendMDKind, node);
+
+
+ if (!isPointerSizedReturn) {
+ messageBB = CGF.Builder.GetInsertBlock();
+ CGF.Builder.CreateBr(continueBB);
+ CGF.EmitBlock(continueBB);
+ if (msgRet.isScalar()) {
+ llvm::Value *v = msgRet.getScalarVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType());
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB);
+ msgRet = RValue::get(phi);
+ } else if (msgRet.isAggregate()) {
+ llvm::Value *v = msgRet.getAggregateAddr();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType());
+ const llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
+ llvm::AllocaInst *NullVal =
+ CGF.CreateTempAlloca(RetTy->getElementType(), "null");
+ CGF.InitTempAlloca(NullVal,
+ llvm::Constant::getNullValue(RetTy->getElementType()));
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(NullVal, startBB);
+ msgRet = RValue::getAggregate(phi);
+ } else /* isComplex() */ {
+ std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v.first->getType());
+ phi->addIncoming(v.first, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v.first->getType()),
+ startBB);
+ llvm::PHINode *phi2 = Builder.CreatePHI(v.second->getType());
+ phi2->addIncoming(v.second, messageBB);
+ phi2->addIncoming(llvm::Constant::getNullValue(v.second->getType()),
+ startBB);
+ msgRet = RValue::getComplex(phi, phi2);
+ }
+ }
+ return msgRet;
+}
+
+/// Generates a MethodList. Used in construction of a objc_class and
+/// objc_category structures.
+llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
+ const std::string &CategoryName,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ bool isClassMethodList) {
+ if (MethodSels.empty())
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+ PtrToInt8Ty, // Method types
+ llvm::PointerType::getUnqual(IMPTy), //Method pointer
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
+ Elements.clear();
+ if (llvm::Constant *Method =
+ TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
+ MethodSels[i].getAsString(),
+ isClassMethodList))) {
+ llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
+ Elements.push_back(C);
+ Elements.push_back(MethodTypes[i]);
+ Method = llvm::ConstantExpr::getBitCast(Method,
+ llvm::PointerType::getUnqual(IMPTy));
+ Elements.push_back(Method);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
+ }
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
+ Methods.size());
+ llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+
+ // Structure containing list pointer, array and array count
+ llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields;
+ llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get(VMContext);
+ llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy);
+ llvm::StructType *ObjCMethodListTy = llvm::StructType::get(VMContext,
+ NextPtrTy,
+ IntTy,
+ ObjCMethodArrayTy,
+ NULL);
+ // Refine next pointer type to concrete type
+ llvm::cast<llvm::OpaqueType>(
+ OpaqueNextTy.get())->refineAbstractTypeTo(ObjCMethodListTy);
+ ObjCMethodListTy = llvm::cast<llvm::StructType>(OpaqueNextTy.get());
+
+ Methods.clear();
+ Methods.push_back(llvm::ConstantPointerNull::get(
+ llvm::PointerType::getUnqual(ObjCMethodListTy)));
+ Methods.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ MethodTypes.size()));
+ Methods.push_back(MethodArray);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
+}
+
+/// Generates an IvarList. Used in construction of a objc_class.
+llvm::Constant *CGObjCGNU::GenerateIvarList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
+ if (IvarNames.size() == 0)
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ IntTy,
+ NULL);
+ std::vector<llvm::Constant*> Ivars;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back(IvarNames[i]);
+ Elements.push_back(IvarTypes[i]);
+ Elements.push_back(IvarOffsets[i]);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements));
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
+ IvarNames.size());
+
+
+ Elements.clear();
+ Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
+ Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
+ // Structure containing array and array count
+ llvm::StructType *ObjCIvarListTy = llvm::StructType::get(VMContext, IntTy,
+ ObjCIvarArrayTy,
+ NULL);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list");
+}
+
+/// Generate a class structure
+llvm::Constant *CGObjCGNU::GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties,
+ bool isMeta) {
+ // Set up the class structure
+ // Note: Several of these are char*s when they should be ids. This is
+ // because the runtime performs this translation on load.
+ //
+ // Fields marked New ABI are part of the GNUstep runtime. We emit them
+ // anyway; the classes will still work with the GNU runtime, they will just
+ // be ignored.
+ llvm::StructType *ClassTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, // class_pointer
+ PtrToInt8Ty, // super_class
+ PtrToInt8Ty, // name
+ LongTy, // version
+ LongTy, // info
+ LongTy, // instance_size
+ IVars->getType(), // ivars
+ Methods->getType(), // methods
+ // These are all filled in by the runtime, so we pretend
+ PtrTy, // dtable
+ PtrTy, // subclass_list
+ PtrTy, // sibling_class
+ PtrTy, // protocols
+ PtrTy, // gc_object_type
+ // New ABI:
+ LongTy, // abi_version
+ IvarOffsets->getType(), // ivar_offsets
+ Properties->getType(), // properties
+ NULL);
+ llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
+ // Fill in the structure
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
+ Elements.push_back(SuperClass);
+ Elements.push_back(MakeConstantString(Name, ".class_name"));
+ Elements.push_back(Zero);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, info));
+ Elements.push_back(InstanceSize);
+ Elements.push_back(IVars);
+ Elements.push_back(Methods);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
+ Elements.push_back(NULLPtr);
+ Elements.push_back(Zero);
+ Elements.push_back(IvarOffsets);
+ Elements.push_back(Properties);
+ // Create an instance of the structure
+ // This is now an externally visible symbol, so that we can speed up class
+ // messages in the next ABI.
+ return MakeGlobal(ClassTy, Elements, (isMeta ? "_OBJC_METACLASS_":
+ "_OBJC_CLASS_") + std::string(Name), llvm::GlobalValue::ExternalLinkage);
+}
+
+llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) {
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
+ PtrToInt8Ty,
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back(MethodNames[i]);
+ Elements.push_back(MethodTypes[i]);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
+ }
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
+ MethodNames.size());
+ llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+ llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(VMContext,
+ IntTy, ObjCMethodArrayTy, NULL);
+ Methods.clear();
+ Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
+ Methods.push_back(Array);
+ return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
+}
+
+// Create the protocol list structure used in classes, categories and so on
+llvm::Constant *CGObjCGNU::GenerateProtocolList(
+ const llvm::SmallVectorImpl<std::string> &Protocols) {
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Protocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ LongTy,//FIXME: Should be size_t
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *protocol = 0;
+ llvm::StringMap<llvm::Constant*>::iterator value =
+ ExistingProtocols.find(*iter);
+ if (value == ExistingProtocols.end()) {
+ protocol = GenerateEmptyProtocol(*iter);
+ } else {
+ protocol = value->getValue();
+ }
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol,
+ PtrToInt8Ty);
+ Elements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ Elements);
+ Elements.clear();
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
+ Elements.push_back(ProtocolArray);
+ return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
+}
+
+llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
+ const llvm::Type *T =
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
+ return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+}
+
+llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
+ const std::string &ProtocolName) {
+ llvm::SmallVector<std::string, 0> EmptyStringVector;
+ llvm::SmallVector<llvm::Constant*, 0> EmptyConstantVector;
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
+ llvm::Constant *MethodList =
+ GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ int Version = CGM.getContext().getLangOptions().ObjCNonFragileABI ?
+ NonFragileProtocolVersion : ProtocolVersion;
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Version), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ return MakeGlobal(ProtocolTy, Elements, ".objc_protocol");
+}
+
+void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ ASTContext &Context = CGM.getContext();
+ std::string ProtocolName = PD->getNameAsString();
+ llvm::SmallVector<std::string, 16> Protocols;
+ for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
+ E = PD->protocol_end(); PI != E; ++PI)
+ Protocols.push_back((*PI)->getNameAsString());
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
+ for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(),
+ E = PD->instmeth_end(); iter != E; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
+ if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+ InstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ OptionalInstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ }
+ // Collect information about class methods:
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
+ for (ObjCProtocolDecl::classmeth_iterator
+ iter = PD->classmeth_begin(), endIter = PD->classmeth_end();
+ iter != endIter ; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+ ClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ OptionalClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ }
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
+ llvm::Constant *InstanceMethodList =
+ GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes);
+ llvm::Constant *ClassMethodList =
+ GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
+ llvm::Constant *OptionalInstanceMethodList =
+ GenerateProtocolMethodList(OptionalInstanceMethodNames,
+ OptionalInstanceMethodTypes);
+ llvm::Constant *OptionalClassMethodList =
+ GenerateProtocolMethodList(OptionalClassMethodNames,
+ OptionalClassMethodTypes);
+
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ // The isSynthesized value is always set to 0 in a protocol. It exists to
+ // simplify the runtime library by allowing it to use the same data
+ // structures for protocol metadata everywhere.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ std::vector<llvm::Constant*> Properties;
+ std::vector<llvm::Constant*> OptionalProperties;
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (ObjCContainerDecl::prop_iterator
+ iter = PD->prop_begin(), endIter = PD->prop_end();
+ iter != endIter ; iter++) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = (*iter);
+
+ Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ property->getPropertyAttributes()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) {
+ OptionalProperties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ } else {
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ }
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size()), Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+ llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
+ PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
+ PropertyListInit, ".objc_property_list");
+
+ llvm::Constant *OptionalPropertyArray =
+ llvm::ConstantArray::get(llvm::ArrayType::get(PropertyMetadataTy,
+ OptionalProperties.size()) , OptionalProperties);
+ llvm::Constant* OptionalPropertyListInitFields[] = {
+ llvm::ConstantInt::get(IntTy, OptionalProperties.size()), NULLPtr,
+ OptionalPropertyArray };
+
+ llvm::Constant *OptionalPropertyListInit =
+ llvm::ConstantStruct::get(VMContext, OptionalPropertyListInitFields, 3, false);
+ llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
+ OptionalPropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
+ ".objc_property_list");
+
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ InstanceMethodList->getType(),
+ ClassMethodList->getType(),
+ OptionalInstanceMethodList->getType(),
+ OptionalClassMethodList->getType(),
+ PropertyList->getType(),
+ OptionalPropertyList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ int Version = CGM.getContext().getLangOptions().ObjCNonFragileABI ?
+ NonFragileProtocolVersion : ProtocolVersion;
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Version), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(InstanceMethodList);
+ Elements.push_back(ClassMethodList);
+ Elements.push_back(OptionalInstanceMethodList);
+ Elements.push_back(OptionalClassMethodList);
+ Elements.push_back(PropertyList);
+ Elements.push_back(OptionalPropertyList);
+ ExistingProtocols[ProtocolName] =
+ llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
+ ".objc_protocol"), IdTy);
+}
+void CGObjCGNU::GenerateProtocolHolderCategory(void) {
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 1> MethodSels;
+ llvm::SmallVector<llvm::Constant*, 1> MethodTypes;
+
+ std::vector<llvm::Constant*> Elements;
+ const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack";
+ const std::string CategoryName = "AnotherHack";
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy));
+ // Protocol list
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
+ ExistingProtocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ LongTy,//FIXME: Should be size_t
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> ProtocolElements;
+ for (llvm::StringMapIterator<llvm::Constant*> iter =
+ ExistingProtocols.begin(), endIter = ExistingProtocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(iter->getValue(),
+ PtrTy);
+ ProtocolElements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ ProtocolElements);
+ ProtocolElements.clear();
+ ProtocolElements.push_back(NULLPtr);
+ ProtocolElements.push_back(llvm::ConstantInt::get(LongTy,
+ ExistingProtocols.size()));
+ ProtocolElements.push_back(ProtocolArray);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
+ ProtocolElements, ".objc_protocol_list"), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ std::string ClassName = OCD->getClassInterface()->getNameAsString();
+ std::string CategoryName = OCD->getNameAsString();
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 16> InstanceMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ iter = OCD->instmeth_begin(), endIter = OCD->instmeth_end();
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ // Collect information about class methods
+ llvm::SmallVector<Selector, 16> ClassMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ iter = OCD->classmeth_begin(), endIter = OCD->classmeth_end();
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ // Collect the names of referenced protocols
+ llvm::SmallVector<std::string, 16> Protocols;
+ const ObjCCategoryDecl *CatDecl = OCD->getCategoryDecl();
+ const ObjCList<ObjCProtocolDecl> &Protos = CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
+ false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true),
+ PtrTy));
+ // Protocol list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(
+ GenerateProtocolList(Protocols), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
+ llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
+ llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) {
+ ASTContext &Context = CGM.getContext();
+ //
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ std::vector<llvm::Constant*> Properties;
+
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (ObjCImplDecl::propimpl_iterator
+ iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
+ iter != endIter ; iter++) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+ ObjCPropertyImplDecl *propertyImpl = *iter;
+ bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize);
+
+ Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ property->getPropertyAttributes()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, isSynthesized));
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(getter->getSelector());
+ }
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(setter->getSelector());
+ }
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ llvm::ArrayType *PropertyArrayTy =
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size());
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(PropertyArrayTy,
+ Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+ return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, PropertyListInit,
+ ".objc_property_list");
+}
+
+void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
+ ASTContext &Context = CGM.getContext();
+
+ // Get the superclass name.
+ const ObjCInterfaceDecl * SuperClassDecl =
+ OID->getClassInterface()->getSuperClass();
+ std::string SuperClassName;
+ if (SuperClassDecl) {
+ SuperClassName = SuperClassDecl->getNameAsString();
+ EmitClassRef(SuperClassName);
+ }
+
+ // Get the class name
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OID->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+ // Emit the symbol that is used to generate linker errors if this class is
+ // referenced in other modules but not declared.
+ std::string classSymbolName = "__objc_class_name_" + ClassName;
+ if (llvm::GlobalVariable *symbol =
+ TheModule.getGlobalVariable(classSymbolName)) {
+ symbol->setInitializer(llvm::ConstantInt::get(LongTy, 0));
+ } else {
+ new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, llvm::ConstantInt::get(LongTy, 0),
+ classSymbolName);
+ }
+
+ // Get the size of instances.
+ int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize() / 8;
+
+ // Collect information about instance variables.
+ llvm::SmallVector<llvm::Constant*, 16> IvarNames;
+ llvm::SmallVector<llvm::Constant*, 16> IvarTypes;
+ llvm::SmallVector<llvm::Constant*, 16> IvarOffsets;
+
+ std::vector<llvm::Constant*> IvarOffsetValues;
+
+ int superInstanceSize = !SuperClassDecl ? 0 :
+ Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize() / 8;
+ // For non-fragile ivars, set the instance size to 0 - {the size of just this
+ // class}. The runtime will then set this to the correct value on load.
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ instanceSize = 0 - (instanceSize - superInstanceSize);
+ }
+
+ // Collect declared and synthesized ivars.
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ CGM.getContext().ShallowCollectObjCIvars(ClassDecl, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
+ // Store the name
+ IvarNames.push_back(MakeConstantString(IVD->getNameAsString()));
+ // Get the type encoding for this ivar
+ std::string TypeStr;
+ Context.getObjCEncodingForType(IVD->getType(), TypeStr);
+ IvarTypes.push_back(MakeConstantString(TypeStr));
+ // Get the offset
+ uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
+ uint64_t Offset = BaseOffset;
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ Offset = BaseOffset - superInstanceSize;
+ }
+ IvarOffsets.push_back(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset));
+ IvarOffsetValues.push_back(new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ llvm::ConstantInt::get(IntTy, BaseOffset),
+ "__objc_ivar_offset_value_" + ClassName +"." +
+ IVD->getNameAsString()));
+ }
+ llvm::Constant *IvarOffsetArrayInit =
+ llvm::ConstantArray::get(llvm::ArrayType::get(PtrToIntTy,
+ IvarOffsetValues.size()), IvarOffsetValues);
+ llvm::GlobalVariable *IvarOffsetArray = new llvm::GlobalVariable(TheModule,
+ IvarOffsetArrayInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, IvarOffsetArrayInit,
+ ".ivar.offsets");
+
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 16> InstanceMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCImplementationDecl::instmeth_iterator
+ iter = OID->instmeth_begin(), endIter = OID->instmeth_end();
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ llvm::Constant *Properties = GeneratePropertyList(OID, InstanceMethodSels,
+ InstanceMethodTypes);
+
+
+ // Collect information about class methods
+ llvm::SmallVector<Selector, 16> ClassMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCImplementationDecl::classmeth_iterator
+ iter = OID->classmeth_begin(), endIter = OID->classmeth_end();
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ // Collect the names of referenced protocols
+ llvm::SmallVector<std::string, 16> Protocols;
+ const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+
+
+ // Get the superclass pointer.
+ llvm::Constant *SuperClass;
+ if (!SuperClassName.empty()) {
+ SuperClass = MakeConstantString(SuperClassName, ".super_class_name");
+ } else {
+ SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ }
+ // Empty vector used to construct empty method lists
+ llvm::SmallVector<llvm::Constant*, 1> empty;
+ // Generate the method and instance variable lists
+ llvm::Constant *MethodList = GenerateMethodList(ClassName, "",
+ InstanceMethodSels, InstanceMethodTypes, false);
+ llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "",
+ ClassMethodSels, ClassMethodTypes, true);
+ llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
+ IvarOffsets);
+ // Irrespective of whether we are compiling for a fragile or non-fragile ABI,
+ // we emit a symbol containing the offset for each ivar in the class. This
+ // allows code compiled for the non-Fragile ABI to inherit from code compiled
+ // for the legacy ABI, without causing problems. The converse is also
+ // possible, but causes all ivar accesses to be fragile.
+ int i = 0;
+ // Offset pointer for getting at the correct field in the ivar list when
+ // setting up the alias. These are: The base address for the global, the
+ // ivar array (second field), the ivar in this list (set for each ivar), and
+ // the offset (third field in ivar structure)
+ const llvm::Type *IndexTy = llvm::Type::getInt32Ty(VMContext);
+ llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
+ llvm::ConstantInt::get(IndexTy, 1), 0,
+ llvm::ConstantInt::get(IndexTy, 2) };
+
+ for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
+ endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
+ const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
+ +(*iter)->getNameAsString();
+ offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, i++);
+ // Get the correct ivar field
+ llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
+ IvarList, offsetPointerIndexes, 4);
+ // Get the existing alias, if one exists.
+ llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
+ if (offset) {
+ offset->setInitializer(offsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ // Add a new alias if there isn't one already.
+ offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
+ false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+ }
+ }
+ //Generate metaclass for class methods
+ llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
+ NULLPtr, 0x12L, ClassName.c_str(), 0, Zeros[0], GenerateIvarList(
+ empty, empty, empty), ClassMethodList, NULLPtr, NULLPtr, NULLPtr, true);
+
+ // Generate the class structure
+ llvm::Constant *ClassStruct =
+ GenerateClassStructure(MetaClassStruct, SuperClass, 0x11L,
+ ClassName.c_str(), 0,
+ llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
+ MethodList, GenerateProtocolList(Protocols), IvarOffsetArray,
+ Properties);
+
+ // Resolve the class aliases, if they exist.
+ if (ClassPtrAlias) {
+ ClassPtrAlias->setAliasee(
+ llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+ ClassPtrAlias = 0;
+ }
+ if (MetaClassPtrAlias) {
+ MetaClassPtrAlias->setAliasee(
+ llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+ MetaClassPtrAlias = 0;
+ }
+
+ // Add class structure to list to be added to the symtab later
+ ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty);
+ Classes.push_back(ClassStruct);
+}
+
+
+llvm::Function *CGObjCGNU::ModuleInitFunction() {
+ // Only emit an ObjC load function if no Objective-C stuff has been called
+ if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
+ ExistingProtocols.empty() && TypedSelectors.empty() &&
+ UntypedSelectors.empty())
+ return NULL;
+
+ // Add all referenced protocols to a category.
+ GenerateProtocolHolderCategory();
+
+ const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
+ SelectorTy->getElementType());
+ const llvm::Type *SelStructPtrTy = SelectorTy;
+ bool isSelOpaque = false;
+ if (SelStructTy == 0) {
+ SelStructTy = llvm::StructType::get(VMContext, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
+ isSelOpaque = true;
+ }
+
+ // Name the ObjC types to make the IR a bit easier to read
+ TheModule.addTypeName(".objc_selector", SelStructPtrTy);
+ TheModule.addTypeName(".objc_id", IdTy);
+ TheModule.addTypeName(".objc_imp", IMPTy);
+
+ std::vector<llvm::Constant*> Elements;
+ llvm::Constant *Statics = NULLPtr;
+ // Generate statics list:
+ if (ConstantStrings.size()) {
+ llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ ConstantStrings.size() + 1);
+ ConstantStrings.push_back(NULLPtr);
+
+ llvm::StringRef StringClass = CGM.getLangOptions().ObjCConstantStringClass;
+ if (StringClass.empty()) StringClass = "NXConstantString";
+ Elements.push_back(MakeConstantString(StringClass,
+ ".objc_static_class_name"));
+ Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
+ ConstantStrings));
+ llvm::StructType *StaticsListTy =
+ llvm::StructType::get(VMContext, PtrToInt8Ty, StaticsArrayTy, NULL);
+ llvm::Type *StaticsListPtrTy =
+ llvm::PointerType::getUnqual(StaticsListTy);
+ Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
+ llvm::ArrayType *StaticsListArrayTy =
+ llvm::ArrayType::get(StaticsListPtrTy, 2);
+ Elements.clear();
+ Elements.push_back(Statics);
+ Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
+ Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr");
+ Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
+ }
+ // Array of classes, categories, and constant objects
+ llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Classes.size() + Categories.size() + 2);
+ llvm::StructType *SymTabTy = llvm::StructType::get(VMContext,
+ LongTy, SelStructPtrTy,
+ llvm::Type::getInt16Ty(VMContext),
+ llvm::Type::getInt16Ty(VMContext),
+ ClassListTy, NULL);
+
+ Elements.clear();
+ // Pointer to an array of selectors used in this module.
+ std::vector<llvm::Constant*> Selectors;
+ for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
+ iter = TypedSelectors.begin(), iterEnd = TypedSelectors.end();
+ iter != iterEnd ; ++iter) {
+ Elements.push_back(ExportUniqueString(iter->first.first, ".objc_sel_name"));
+ Elements.push_back(MakeConstantString(iter->first.second,
+ ".objc_sel_types"));
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ }
+ for (llvm::StringMap<llvm::GlobalAlias*>::iterator
+ iter = UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
+ iter != iterEnd; ++iter) {
+ Elements.push_back(
+ ExportUniqueString(iter->getKeyData(), ".objc_sel_name"));
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ }
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ // Number of static selectors
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Selectors.size() ));
+ llvm::Constant *SelectorList = MakeGlobal(
+ llvm::ArrayType::get(SelStructTy, Selectors.size()), Selectors,
+ ".objc_selector_list");
+ Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
+ SelStructPtrTy));
+
+ // Now that all of the static selectors exist, create pointers to them.
+ int index = 0;
+ for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
+ iter=TypedSelectors.begin(), iterEnd =TypedSelectors.end();
+ iter != iterEnd; ++iter) {
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
+ llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
+ true, llvm::GlobalValue::LinkOnceODRLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ MangleSelectorTypes(".objc_sel_ptr"+iter->first.first+"."+
+ iter->first.second));
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ if (isSelOpaque) {
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
+ llvm::PointerType::getUnqual(SelectorTy));
+ }
+ (*iter).second->replaceAllUsesWith(SelPtr);
+ (*iter).second->eraseFromParent();
+ }
+ for (llvm::StringMap<llvm::GlobalAlias*>::iterator
+ iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
+ iter != iterEnd; iter++) {
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
+ llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
+ true, llvm::GlobalValue::LinkOnceODRLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ MangleSelectorTypes(std::string(".objc_sel_ptr")+iter->getKey().str()));
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ if (isSelOpaque) {
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
+ llvm::PointerType::getUnqual(SelectorTy));
+ }
+ (*iter).second->replaceAllUsesWith(SelPtr);
+ (*iter).second->eraseFromParent();
+ }
+ // Number of classes defined.
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Classes.size()));
+ // Number of categories defined
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Categories.size()));
+ // Create an array of classes, then categories, then static object instances
+ Classes.insert(Classes.end(), Categories.begin(), Categories.end());
+ // NULL-terminated list of static object instances (mainly constant strings)
+ Classes.push_back(Statics);
+ Classes.push_back(NULLPtr);
+ llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
+ Elements.push_back(ClassList);
+ // Construct the symbol table
+ llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
+
+ // The symbol table is contained in a module which has some version-checking
+ // constants
+ llvm::StructType * ModuleTy = llvm::StructType::get(VMContext, LongTy, LongTy,
+ PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), NULL);
+ Elements.clear();
+ // Runtime version used for compatibility checking.
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ Elements.push_back(llvm::ConstantInt::get(LongTy,
+ NonFragileRuntimeVersion));
+ } else {
+ Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
+ }
+ // sizeof(ModuleTy)
+ llvm::TargetData td(&TheModule);
+ Elements.push_back(llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ModuleTy)/8));
+ //FIXME: Should be the path to the file where this module was declared
+ Elements.push_back(NULLPtr);
+ Elements.push_back(SymTab);
+ llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
+
+ // Create the load function calling the runtime entry point with the module
+ // structure
+ llvm::Function * LoadFunction = llvm::Function::Create(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false),
+ llvm::GlobalValue::InternalLinkage, ".objc_load_function",
+ &TheModule);
+ llvm::BasicBlock *EntryBB =
+ llvm::BasicBlock::Create(VMContext, "entry", LoadFunction);
+ CGBuilderTy Builder(VMContext);
+ Builder.SetInsertPoint(EntryBB);
+
+ std::vector<const llvm::Type*> Params(1,
+ llvm::PointerType::getUnqual(ModuleTy));
+ llvm::Value *Register = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getVoidTy(VMContext), Params, true), "__objc_exec_class");
+ Builder.CreateCall(Register, Module);
+ Builder.CreateRetVoid();
+
+ return LoadFunction;
+}
+
+llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ const ObjCCategoryImplDecl *OCD =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ std::string CategoryName = OCD ? OCD->getNameAsString() : "";
+ std::string ClassName = CD->getName();
+ std::string MethodName = OMD->getSelector().getAsString();
+ bool isClassMethod = !OMD->isInstanceMethod();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
+ MethodName, isClassMethod);
+
+ llvm::Function *Method
+ = llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ FunctionName,
+ &TheModule);
+ return Method;
+}
+
+llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ Params.push_back(IntTy);
+ Params.push_back(BoolTy);
+ // void objc_getProperty (id, SEL, int, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(IdTy, Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_getProperty"));
+}
+
+llvm::Function *CGObjCGNU::GetPropertySetFunction() {
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ Params.push_back(IntTy);
+ Params.push_back(IdTy);
+ Params.push_back(BoolTy);
+ Params.push_back(BoolTy);
+ // void objc_setProperty (id, SEL, int, id, bool, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_setProperty"));
+}
+
+// FIXME. Implement this.
+llvm::Function *CGObjCGNU::GetCopyStructFunction() {
+ return 0;
+}
+
+llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_enumerationMutation (id)
+ llvm::SmallVector<CanQualType,1> Params;
+ Params.push_back(ASTIdTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo()), false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+}
+
+namespace {
+ struct CallSyncExit : EHScopeStack::Cleanup {
+ llvm::Value *SyncExitFn;
+ llvm::Value *SyncArg;
+ CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
+ : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
+ }
+ };
+}
+
+void CGObjCGNU::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+
+ // Evaluate the lock operand. This should dominate the cleanup.
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(S.getSynchExpr());
+
+ // Acquire the lock.
+ llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncEnter, SyncArg);
+
+ // Register an all-paths cleanup to release the lock.
+ llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, SyncExit, SyncArg);
+
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
+
+ // Pop the lock-release cleanup.
+ CGF.PopCleanupBlock();
+}
+
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
+}
+
+void CGObjCGNU::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Unlike the Apple non-fragile runtimes, which also uses
+ // unwind-based zero cost exceptions, the GNU Objective C runtime's
+ // EH support isn't a veneer over C++ EH. Instead, exception
+ // objects are created by __objc_exception_throw and destroyed by
+ // the personality function; this avoids the need for bracketing
+ // catch handlers with calls to __blah_begin_catch/__blah_end_catch
+ // (or even _Unwind_DeleteException), but probably doesn't
+ // interoperate very well with foreign exceptions.
+
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ // We handle @finally statements by pushing them as a cleanup
+ // before entering the catch.
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::Constant *Rethrow =
+ CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+
+ FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(), 0, 0,
+ Rethrow);
+ }
+
+ llvm::SmallVector<CatchHandler, 8> Handlers;
+
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
+
+ // @catch() and @catch(id) both catch any ObjC exception.
+ // Treat them as catch-alls.
+ // FIXME: this is what this code was doing before, but should 'id'
+ // really be catching foreign exceptions?
+ if (!CatchDecl
+ || CatchDecl->getType()->isObjCIdType()
+ || CatchDecl->getType()->isObjCQualifiedIdType()) {
+
+ Handler.TypeInfo = 0; // catch-all
+
+ // Don't consider any other catches.
+ break;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT =
+ CatchDecl->getType()->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl =
+ OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ Handler.TypeInfo = MakeConstantString(IDecl->getNameAsString());
+ }
+
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
+ }
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
+
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.EHStack.popCatch();
+
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
+ CGF.EmitBlock(Handler.Block);
+
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
+
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ Exn = CGF.Builder.CreateBitCast(Exn, CatchType);
+
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(Exn, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
+
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
+
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
+
+ // Pop out of the finally.
+ if (S.getFinallyStmt())
+ CGF.ExitFinallyBlock(FinallyInfo);
+
+ if (Cont.isValid()) {
+ if (Cont.getBlock()->use_empty())
+ delete Cont.getBlock();
+ else
+ CGF.EmitBlock(Cont.getBlock());
+ }
+}
+
+void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::Value *ThrowFn =
+ CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ ExceptionAsObject = Exception;
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy, "tmp");
+
+ // Note: This may have to be an invoke, if we want to support constructs like:
+ // @try {
+ // @throw(obj);
+ // }
+ // @catch(id) ...
+ //
+ // This is effectively turning @throw into an incredibly-expensive goto, but
+ // it may happen as a result of inlining followed by missed optimizations, or
+ // as a result of stupidity.
+ llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
+ if (!UnwindBB) {
+ CGF.Builder.CreateCall(ThrowFn, ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateInvoke(ThrowFn, UnwindBB, UnwindBB, &ExceptionAsObject,
+ &ExceptionAsObject+1);
+ }
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ CGBuilderTy B = CGF.Builder;
+ AddrWeakObj = EnforceType(B, AddrWeakObj, IdTy);
+ return B.CreateCall(WeakReadFn, AddrWeakObj);
+}
+
+void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall2(WeakAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ if (!threadlocal)
+ B.CreateCall2(GlobalAssignFn, src, dst);
+ else
+ // FIXME. Add threadloca assign API
+ assert(false && "EmitObjCGlobalAssign - Threal Local API NYI");
+}
+
+void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall3(IvarAssignFn, src, dst, ivarOffset);
+}
+
+void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall2(StrongCastAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size) {
+ CGBuilderTy B = CGF.Builder;
+ DestPtr = EnforceType(B, DestPtr, IdTy);
+ SrcPtr = EnforceType(B, SrcPtr, PtrToIdTy);
+
+ B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, Size);
+}
+
+llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+ + '.' + Ivar->getNameAsString();
+ // Emit the variable and initialize it with what we think the correct value
+ // is. This allows code compiled with non-fragile ivars to work correctly
+ // when linked against code which isn't (most of the time).
+ llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
+ if (!IvarOffsetPointer) {
+ uint64_t Offset;
+ if (ObjCImplementationDecl *OID =
+ CGM.getContext().getObjCImplementation(
+ const_cast<ObjCInterfaceDecl *>(ID)))
+ Offset = ComputeIvarBaseOffset(CGM, OID, Ivar);
+ else
+ Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
+
+ llvm::ConstantInt *OffsetGuess =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset, "ivar");
+ // Don't emit the guess in non-PIC code because the linker will not be able
+ // to replace it with the real version for a library. In non-PIC code you
+ // must compile with the fragile ABI if you want to use ivars from a
+ // GCC-compiled class.
+ if (CGM.getLangOptions().PICLevel) {
+ llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
+ llvm::Type::getInt32Ty(VMContext), false,
+ llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ IvarOffsetGV->getType(), false, llvm::GlobalValue::LinkOnceAnyLinkage,
+ IvarOffsetGV, Name);
+ } else {
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ llvm::Type::getInt32PtrTy(VMContext), false,
+ llvm::GlobalValue::ExternalLinkage, 0, Name);
+ }
+ }
+ return IvarOffsetPointer;
+}
+
+LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *OIVD) {
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ Context.ShallowCollectObjCIvars(OID, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+ if (OIVD == Ivars[k])
+ return OID;
+ }
+
+ // Otherwise check in the super class.
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return FindIvarInterface(Context, Super, OIVD);
+
+ return 0;
+}
+
+llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ if (CGM.getLangOptions().ObjCNonFragileABI) {
+ Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
+ return CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar"));
+ }
+ uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(LongTy, Offset, "ivar");
+}
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateGNUObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ return new CGObjCGNU(CGM);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
new file mode 100644
index 0000000..54d0ff2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
@@ -0,0 +1,6236 @@
+//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targetting the Apple runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+
+#include "CGRecordLayout.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGException.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+
+#include "llvm/InlineAsm.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+// Common CGObjCRuntime functions, these don't belong here, but they
+// don't belong in CGObjCRuntime either so we will live with it for
+// now.
+
+static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCImplementationDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+
+ // FIXME: We should eliminate the need to have ObjCImplementationDecl passed
+ // in here; it should never be necessary because that should be the lexical
+ // decl context for the ivar.
+
+ // If we know have an implementation (and the ivar is in it) then
+ // look up in the implementation layout.
+ const ASTRecordLayout *RL;
+ if (ID && ID->getClassInterface() == Container)
+ RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
+ else
+ RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
+
+ // Compute field index.
+ //
+ // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is
+ // implemented. This should be fixed to get the information from the layout
+ // directly.
+ unsigned Index = 0;
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ CGM.getContext().ShallowCollectObjCIvars(Container, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+ if (Ivar == Ivars[k])
+ break;
+ ++Index;
+ }
+ assert(Index != Ivars.size() && "Ivar is not inside container!");
+
+ return RL->getFieldOffset(Index);
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID, 0, Ivar) / 8;
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / 8;
+}
+
+LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset) {
+ // Compute (type*) ( (char *) BaseValue + Offset)
+ const llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ QualType IvarTy = Ivar->getType();
+ const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+ llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
+ V = CGF.Builder.CreateGEP(V, Offset, "add.ptr");
+ V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
+
+ if (!Ivar->isBitField()) {
+ LValue LV = CGF.MakeAddrLValue(V, IvarTy);
+ LV.getQuals().addCVRQualifiers(CVRQualifiers);
+ return LV;
+ }
+
+ // We need to compute an access strategy for this bit-field. We are given the
+ // offset to the first byte in the bit-field, the sub-byte offset is taken
+ // from the original layout. We reuse the normal bit-field access strategy by
+ // treating this as an access to a struct where the bit-field is in byte 0,
+ // and adjust the containing type size as appropriate.
+ //
+ // FIXME: Note that currently we make a very conservative estimate of the
+ // alignment of the bit-field, because (a) it is not clear what guarantees the
+ // runtime makes us, and (b) we don't have a way to specify that the struct is
+ // at an alignment plus offset.
+ //
+ // Note, there is a subtle invariant here: we can only call this routine on
+ // non-synthesized ivars but we may be called for synthesized ivars. However,
+ // a synthesized ivar can never be a bit-field, so this is safe.
+ const ASTRecordLayout &RL =
+ CGF.CGM.getContext().getASTObjCInterfaceLayout(OID);
+ uint64_t TypeSizeInBits = RL.getSize();
+ uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
+ uint64_t BitOffset = FieldBitOffset % 8;
+ uint64_t ContainingTypeAlign = 8;
+ uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
+ uint64_t BitFieldSize =
+ Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
+
+ // Allocate a new CGBitFieldInfo object to describe this access.
+ //
+ // FIXME: This is incredibly wasteful, these should be uniqued or part of some
+ // layout object. However, this is blocked on other cleanups to the
+ // Objective-C code, so for now we just live with allocating a bunch of these
+ // objects.
+ CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
+ CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
+ ContainingTypeSize, ContainingTypeAlign));
+
+ return LValue::MakeBitfield(V, *Info,
+ IvarTy.getCVRQualifiers() | CVRQualifiers);
+}
+
+///
+
+namespace {
+
+typedef std::vector<llvm::Constant*> ConstantVector;
+
+// FIXME: We should find a nicer way to make the labels for metadata, string
+// concatenation is lame.
+
+class ObjCCommonTypesHelper {
+protected:
+ llvm::LLVMContext &VMContext;
+
+private:
+ llvm::Constant *getMessageSendFn() const {
+ // id objc_msgSend (id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend");
+ }
+
+ llvm::Constant *getMessageSendStretFn() const {
+ // id objc_msgSend_stret (id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, true),
+ "objc_msgSend_stret");
+
+ }
+
+ llvm::Constant *getMessageSendFpretFn() const {
+ // FIXME: This should be long double on x86_64?
+ // [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getDoubleTy(VMContext),
+ Params,
+ true),
+ "objc_msgSend_fpret");
+
+ }
+
+ llvm::Constant *getMessageSendSuperFn() const {
+ // id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+ const char *SuperName = "objc_msgSendSuper";
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ SuperName);
+ }
+
+ llvm::Constant *getMessageSendSuperFn2() const {
+ // id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
+ const char *SuperName = "objc_msgSendSuper2";
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ SuperName);
+ }
+
+ llvm::Constant *getMessageSendSuperStretFn() const {
+ // void objc_msgSendSuper_stret(void * stretAddr, struct objc_super *super,
+ // SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, true),
+ "objc_msgSendSuper_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperStretFn2() const {
+ // void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
+ // SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, true),
+ "objc_msgSendSuper2_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn();
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn2() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn2();
+ }
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+
+public:
+ const llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
+ const llvm::Type *Int8PtrTy;
+
+ /// ObjectPtrTy - LLVM type for object handles (typeof(id))
+ const llvm::Type *ObjectPtrTy;
+
+ /// PtrObjectPtrTy - LLVM type for id *
+ const llvm::Type *PtrObjectPtrTy;
+
+ /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
+ const llvm::Type *SelectorPtrTy;
+ /// ProtocolPtrTy - LLVM type for external protocol handles
+ /// (typeof(Protocol))
+ const llvm::Type *ExternalProtocolPtrTy;
+
+ // SuperCTy - clang type for struct objc_super.
+ QualType SuperCTy;
+ // SuperPtrCTy - clang type for struct objc_super *.
+ QualType SuperPtrCTy;
+
+ /// SuperTy - LLVM type for struct objc_super.
+ const llvm::StructType *SuperTy;
+ /// SuperPtrTy - LLVM type for struct objc_super *.
+ const llvm::Type *SuperPtrTy;
+
+ /// PropertyTy - LLVM type for struct objc_property (struct _prop_t
+ /// in GCC parlance).
+ const llvm::StructType *PropertyTy;
+
+ /// PropertyListTy - LLVM type for struct objc_property_list
+ /// (_prop_list_t in GCC parlance).
+ const llvm::StructType *PropertyListTy;
+ /// PropertyListPtrTy - LLVM type for struct objc_property_list*.
+ const llvm::Type *PropertyListPtrTy;
+
+ // MethodTy - LLVM type for struct objc_method.
+ const llvm::StructType *MethodTy;
+
+ /// CacheTy - LLVM type for struct objc_cache.
+ const llvm::Type *CacheTy;
+ /// CachePtrTy - LLVM type for struct objc_cache *.
+ const llvm::Type *CachePtrTy;
+
+ llvm::Constant *getGetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // id objc_getProperty (id, SEL, ptrdiff_t, bool)
+ llvm::SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(IdType, Params,
+ FunctionType::ExtInfo()),
+ false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
+ }
+
+ llvm::Constant *getSetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ llvm::SmallVector<CanQualType,6> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(IdType);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo()),
+ false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
+ }
+
+
+ llvm::Constant *getCopyStructFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_copyStruct (void *, const void *, size_t, bool, bool)
+ llvm::SmallVector<CanQualType,5> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo()),
+ false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
+ }
+
+ llvm::Constant *getEnumerationMutationFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_enumerationMutation (id)
+ llvm::SmallVector<CanQualType,1> Params;
+ Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo()),
+ false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+ }
+
+ /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
+ llvm::Constant *getGcReadWeakFn() {
+ // id objc_read_weak (id *)
+ std::vector<const llvm::Type*> Args;
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+ }
+
+ /// GcAssignWeakFn -- LLVM objc_assign_weak function.
+ llvm::Constant *getGcAssignWeakFn() {
+ // id objc_assign_weak (id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+ }
+
+ /// GcAssignGlobalFn -- LLVM objc_assign_global function.
+ llvm::Constant *getGcAssignGlobalFn() {
+ // id objc_assign_global(id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+ }
+
+ /// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
+ llvm::Constant *getGcAssignThreadLocalFn() {
+ // id objc_assign_threadlocal(id src, id * dest)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
+ }
+
+ /// GcAssignIvarFn -- LLVM objc_assign_ivar function.
+ llvm::Constant *getGcAssignIvarFn() {
+ // id objc_assign_ivar(id, id *, ptrdiff_t)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ Args.push_back(LongTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+ }
+
+ /// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
+ llvm::Constant *GcMemmoveCollectableFn() {
+ // void *objc_memmove_collectable(void *dst, const void *src, size_t size)
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+ Args.push_back(Int8PtrTy);
+ Args.push_back(LongTy);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+ }
+
+ /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
+ llvm::Constant *getGcAssignStrongCastFn() {
+ // id objc_assign_strongCast(id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+ }
+
+ /// ExceptionThrowFn - LLVM objc_exception_throw function.
+ llvm::Constant *getExceptionThrowFn() {
+ // void objc_exception_throw(id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+ }
+
+ /// ExceptionRethrowFn - LLVM objc_exception_rethrow function.
+ llvm::Constant *getExceptionRethrowFn() {
+ // void objc_exception_rethrow(void)
+ std::vector<const llvm::Type*> Args;
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, true);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow");
+ }
+
+ /// SyncEnterFn - LLVM object_sync_enter function.
+ llvm::Constant *getSyncEnterFn() {
+ // void objc_sync_enter (id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ }
+
+ /// SyncExitFn - LLVM object_sync_exit function.
+ llvm::Constant *getSyncExitFn() {
+ // void objc_sync_exit (id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ }
+
+ llvm::Constant *getSendFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendStretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendStretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendFpretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn();
+ }
+
+ llvm::Constant *getSendFpretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
+ }
+
+ ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCCommonTypesHelper(){}
+};
+
+/// ObjCTypesHelper - Helper class that encapsulates lazy
+/// construction of varies types used during ObjC generation.
+class ObjCTypesHelper : public ObjCCommonTypesHelper {
+public:
+ /// SymtabTy - LLVM type for struct objc_symtab.
+ const llvm::StructType *SymtabTy;
+ /// SymtabPtrTy - LLVM type for struct objc_symtab *.
+ const llvm::Type *SymtabPtrTy;
+ /// ModuleTy - LLVM type for struct objc_module.
+ const llvm::StructType *ModuleTy;
+
+ /// ProtocolTy - LLVM type for struct objc_protocol.
+ const llvm::StructType *ProtocolTy;
+ /// ProtocolPtrTy - LLVM type for struct objc_protocol *.
+ const llvm::Type *ProtocolPtrTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension.
+ const llvm::StructType *ProtocolExtensionTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension *.
+ const llvm::Type *ProtocolExtensionPtrTy;
+ /// MethodDescriptionTy - LLVM type for struct
+ /// objc_method_description.
+ const llvm::StructType *MethodDescriptionTy;
+ /// MethodDescriptionListTy - LLVM type for struct
+ /// objc_method_description_list.
+ const llvm::StructType *MethodDescriptionListTy;
+ /// MethodDescriptionListPtrTy - LLVM type for struct
+ /// objc_method_description_list *.
+ const llvm::Type *MethodDescriptionListPtrTy;
+ /// ProtocolListTy - LLVM type for struct objc_property_list.
+ const llvm::Type *ProtocolListTy;
+ /// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
+ const llvm::Type *ProtocolListPtrTy;
+ /// CategoryTy - LLVM type for struct objc_category.
+ const llvm::StructType *CategoryTy;
+ /// ClassTy - LLVM type for struct objc_class.
+ const llvm::StructType *ClassTy;
+ /// ClassPtrTy - LLVM type for struct objc_class *.
+ const llvm::Type *ClassPtrTy;
+ /// ClassExtensionTy - LLVM type for struct objc_class_ext.
+ const llvm::StructType *ClassExtensionTy;
+ /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
+ const llvm::Type *ClassExtensionPtrTy;
+ // IvarTy - LLVM type for struct objc_ivar.
+ const llvm::StructType *IvarTy;
+ /// IvarListTy - LLVM type for struct objc_ivar_list.
+ const llvm::Type *IvarListTy;
+ /// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
+ const llvm::Type *IvarListPtrTy;
+ /// MethodListTy - LLVM type for struct objc_method_list.
+ const llvm::Type *MethodListTy;
+ /// MethodListPtrTy - LLVM type for struct objc_method_list *.
+ const llvm::Type *MethodListPtrTy;
+
+ /// ExceptionDataTy - LLVM type for struct _objc_exception_data.
+ const llvm::Type *ExceptionDataTy;
+
+ /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
+ llvm::Constant *getExceptionTryEnterFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, false),
+ "objc_exception_try_enter");
+ }
+
+ /// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
+ llvm::Constant *getExceptionTryExitFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, false),
+ "objc_exception_try_exit");
+ }
+
+ /// ExceptionExtractFn - LLVM objc_exception_extract function.
+ llvm::Constant *getExceptionExtractFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, false),
+ "objc_exception_extract");
+
+ }
+
+ /// ExceptionMatchFn - LLVM objc_exception_match function.
+ llvm::Constant *getExceptionMatchFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ClassPtrTy);
+ Params.push_back(ObjectPtrTy);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+ Params, false),
+ "objc_exception_match");
+
+ }
+
+ /// SetJmpFn - LLVM _setjmp function.
+ llvm::Constant *getSetJmpFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::Type::getInt32PtrTy(VMContext));
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+ Params, false),
+ "_setjmp");
+
+ }
+
+public:
+ ObjCTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCTypesHelper() {}
+};
+
+/// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's
+/// modern abi
+class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
+public:
+
+ // MethodListnfABITy - LLVM for struct _method_list_t
+ const llvm::StructType *MethodListnfABITy;
+
+ // MethodListnfABIPtrTy - LLVM for struct _method_list_t*
+ const llvm::Type *MethodListnfABIPtrTy;
+
+ // ProtocolnfABITy = LLVM for struct _protocol_t
+ const llvm::StructType *ProtocolnfABITy;
+
+ // ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
+ const llvm::Type *ProtocolnfABIPtrTy;
+
+ // ProtocolListnfABITy - LLVM for struct _objc_protocol_list
+ const llvm::StructType *ProtocolListnfABITy;
+
+ // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
+ const llvm::Type *ProtocolListnfABIPtrTy;
+
+ // ClassnfABITy - LLVM for struct _class_t
+ const llvm::StructType *ClassnfABITy;
+
+ // ClassnfABIPtrTy - LLVM for struct _class_t*
+ const llvm::Type *ClassnfABIPtrTy;
+
+ // IvarnfABITy - LLVM for struct _ivar_t
+ const llvm::StructType *IvarnfABITy;
+
+ // IvarListnfABITy - LLVM for struct _ivar_list_t
+ const llvm::StructType *IvarListnfABITy;
+
+ // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
+ const llvm::Type *IvarListnfABIPtrTy;
+
+ // ClassRonfABITy - LLVM for struct _class_ro_t
+ const llvm::StructType *ClassRonfABITy;
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ const llvm::Type *ImpnfABITy;
+
+ // CategorynfABITy - LLVM for struct _category_t
+ const llvm::StructType *CategorynfABITy;
+
+ // New types for nonfragile abi messaging.
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+ const llvm::StructType *MessageRefTy;
+ // MessageRefCTy - clang type for struct _message_ref_t
+ QualType MessageRefCTy;
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ const llvm::Type *MessageRefPtrTy;
+ // MessageRefCPtrTy - clang type for struct _message_ref_t*
+ QualType MessageRefCPtrTy;
+
+ // MessengerTy - Type of the messenger (shown as IMP above)
+ const llvm::FunctionType *MessengerTy;
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ const llvm::StructType *SuperMessageRefTy;
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ const llvm::Type *SuperMessageRefPtrTy;
+
+ llvm::Constant *getMessageSendFixupFn() {
+ // id objc_msgSend_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_fixup");
+ }
+
+ llvm::Constant *getMessageSendFpretFixupFn() {
+ // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_fpret_fixup");
+ }
+
+ llvm::Constant *getMessageSendStretFixupFn() {
+ // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_stret_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2FixupFn() {
+ // id objc_msgSendSuper2_fixup (struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SuperMessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendSuper2_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2StretFixupFn() {
+ // id objc_msgSendSuper2_stret_fixup(struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SuperMessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendSuper2_stret_fixup");
+ }
+
+ llvm::Constant *getObjCEndCatchFn() {
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false),
+ "objc_end_catch");
+
+ }
+
+ llvm::Constant *getObjCBeginCatchFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
+ Params, false),
+ "objc_begin_catch");
+ }
+
+ const llvm::StructType *EHTypeTy;
+ const llvm::Type *EHTypePtrTy;
+
+ ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCNonFragileABITypesHelper(){}
+};
+
+class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
+public:
+ // FIXME - accessibility
+ class GC_IVAR {
+ public:
+ unsigned ivar_bytepos;
+ unsigned ivar_size;
+ GC_IVAR(unsigned bytepos = 0, unsigned size = 0)
+ : ivar_bytepos(bytepos), ivar_size(size) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const GC_IVAR &b) const {
+ return ivar_bytepos < b.ivar_bytepos;
+ }
+ };
+
+ class SKIP_SCAN {
+ public:
+ unsigned skip;
+ unsigned scan;
+ SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0)
+ : skip(_skip), scan(_scan) {}
+ };
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+ llvm::LLVMContext &VMContext;
+ // FIXME! May not be needing this after all.
+ unsigned ObjCABI;
+
+ // gc ivar layout bitmap calculation helper caches.
+ llvm::SmallVector<GC_IVAR, 16> SkipIvars;
+ llvm::SmallVector<GC_IVAR, 16> IvarsInfo;
+
+ /// LazySymbols - Symbols to generate a lazy reference for. See
+ /// DefinedSymbols and FinishModule().
+ llvm::SetVector<IdentifierInfo*> LazySymbols;
+
+ /// DefinedSymbols - External symbols which are defined by this
+ /// module. The symbols in this list and LazySymbols are used to add
+ /// special linker symbols which ensure that Objective-C modules are
+ /// linked properly.
+ llvm::SetVector<IdentifierInfo*> DefinedSymbols;
+
+ /// ClassNames - uniqued class names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassNames;
+
+ /// MethodVarNames - uniqued method variable names.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
+
+ /// DefinedCategoryNames - list of category names in form Class_Category.
+ llvm::SetVector<std::string> DefinedCategoryNames;
+
+ /// MethodVarTypes - uniqued method type signatures. We have to use
+ /// a StringMap here because have no other unique reference.
+ llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
+
+ /// MethodDefinitions - map of methods which have been defined in
+ /// this translation unit.
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions;
+
+ /// PropertyNames - uniqued method variable names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames;
+
+ /// ClassReferences - uniqued class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences;
+
+ /// SelectorReferences - uniqued selector references.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences;
+
+ /// Protocols - Protocols for which an objc_protocol structure has
+ /// been emitted. Forward declarations are handled by creating an
+ /// empty structure whose initializer is filled in when/if defined.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols;
+
+ /// DefinedProtocols - Protocols which have actually been
+ /// defined. We should not need this, see FIXME in GenerateProtocol.
+ llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
+
+ /// DefinedClasses - List of defined classes.
+ std::vector<llvm::GlobalValue*> DefinedClasses;
+
+ /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+ std::vector<llvm::GlobalValue*> DefinedNonLazyClasses;
+
+ /// DefinedCategories - List of defined categories.
+ std::vector<llvm::GlobalValue*> DefinedCategories;
+
+ /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+ std::vector<llvm::GlobalValue*> DefinedNonLazyCategories;
+
+ /// GetNameForMethod - Return a name for the given method.
+ /// \param[out] NameOut - The return value.
+ void GetNameForMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD,
+ llvm::SmallVectorImpl<char> &NameOut);
+
+ /// GetMethodVarName - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+ llvm::Constant *GetMethodVarName(Selector Sel);
+ llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
+
+ /// GetMethodVarType - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+
+ // FIXME: This is a horrible name.
+ llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D);
+ llvm::Constant *GetMethodVarType(const FieldDecl *D);
+
+ /// GetPropertyName - Return a unique constant for the given
+ /// name. The return value has type char *.
+ llvm::Constant *GetPropertyName(IdentifierInfo *Ident);
+
+ // FIXME: This can be dropped once string functions are unified.
+ llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container);
+
+ /// GetClassName - Return a unique constant for the given selector's
+ /// name. The return value has type char *.
+ llvm::Constant *GetClassName(IdentifierInfo *Ident);
+
+ llvm::Function *GetMethodDefinition(const ObjCMethodDecl *MD);
+
+ /// BuildIvarLayout - Builds ivar layout bitmap for the class
+ /// implementation for the __strong or __weak case.
+ ///
+ llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
+ bool ForStrongLayout);
+
+ llvm::Constant *BuildIvarLayoutBitmap(std::string &BitMap);
+
+ void BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+ void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+
+ /// GetIvarLayoutName - Returns a unique constant for the given
+ /// ivar layout bitmap.
+ llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitPropertyList - Emit the given property list. The return
+ /// value has type PropertyListPtrTy.
+ llvm::Constant *EmitPropertyList(llvm::Twine Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// PushProtocolProperties - Push protocol's property on the input stack.
+ void PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+ std::vector<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// GetProtocolRef - Return a reference to the internal protocol
+ /// description, creating an empty one if it has not been
+ /// defined. The return value has type ProtocolPtrTy.
+ llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// CreateMetadataVar - Create a global variable with internal
+ /// linkage for use by the Objective-C runtime.
+ ///
+ /// This is a convenience wrapper which not only creates the
+ /// variable, but also sets the section and alignment and adds the
+ /// global to the "llvm.used" list.
+ ///
+ /// \param Name - The variable name.
+ /// \param Init - The variable initializer; this is also used to
+ /// define the type of the variable.
+ /// \param Section - The section the variable should go into, or 0.
+ /// \param Align - The alignment for the variable, or 0.
+ /// \param AddToUsed - Whether the variable should be added to
+ /// "llvm.used".
+ llvm::GlobalVariable *CreateMetadataVar(llvm::Twine Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed);
+
+ CodeGen::RValue EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *OMD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitImageInfo - Emit the image info marker used to encode some module
+ /// level information.
+ void EmitImageInfo();
+
+public:
+ CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
+ CGM(cgm), VMContext(cgm.getLLVMContext()) { }
+
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *SL);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD=0);
+
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+ virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
+ const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &);
+
+};
+
+class CGObjCMac : public CGObjCCommonMac {
+private:
+ ObjCTypesHelper ObjCTypes;
+
+ /// EmitModuleInfo - Another marker encoding module level
+ /// information.
+ void EmitModuleInfo();
+
+ /// EmitModuleSymols - Emit module symbols, the list of defined
+ /// classes and categories. The result has type SymtabPtrTy.
+ llvm::Constant *EmitModuleSymbols();
+
+ /// FinishModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishModule();
+
+ /// EmitClassExtension - Generate the class extension structure used
+ /// to store the weak ivar layout and properties. The return value
+ /// has type ClassExtensionPtrTy.
+ llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitSuperClassRef - Emits reference to class's main metadata class.
+ llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass);
+
+ /// EmitMetaClass - Emit a forward reference to the class structure
+ /// for the metaclass of the given interface. The return value has
+ /// type ClassPtrTy.
+ llvm::Constant *EmitMetaClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClass - Emit a class structure for the metaclass of the
+ /// given implementation. The return value has type ClassPtrTy.
+ llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ const ConstantVector &Methods);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListPtrTy.
+ llvm::Constant *EmitMethodList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods);
+
+ /// EmitMethodDescList - Emit a method description list for a list of
+ /// method declarations.
+ /// - TypeName: The name for the type containing the methods.
+ /// - IsProtocol: True iff these methods are for a protocol.
+ /// - ClassMethds: True iff these are class methods.
+ /// - Required: When true, only "required" methods are
+ /// listed. Similarly, when false only "optional" methods are
+ /// listed. For classes this should always be true.
+ /// - begin, end: The method list to output.
+ ///
+ /// The return value has type MethodDescriptionListPtrTy.
+ llvm::Constant *EmitMethodDescList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolExtension - Generate the protocol extension
+ /// structure used to store optional instance and class methods, and
+ /// protocol properties. The return value has type
+ /// ProtocolExtensionPtrTy.
+ llvm::Constant *
+ EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ const ConstantVector &OptInstanceMethods,
+ const ConstantVector &OptClassMethods);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(llvm::Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval=false);
+
+public:
+ CGObjCMac(CodeGen::CodeGenModule &cgm);
+
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval = false);
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Constant *GetEHType(QualType T);
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetPropertyGetFunction();
+ virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetCopyStructFunction();
+ virtual llvm::Constant *EnumerationMutationFunction();
+
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal = false);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src,
+ llvm::Value *size);
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+
+class CGObjCNonFragileABIMac : public CGObjCCommonMac {
+private:
+ ObjCNonFragileABITypesHelper ObjCTypes;
+ llvm::GlobalVariable* ObjCEmptyCacheVar;
+ llvm::GlobalVariable* ObjCEmptyVtableVar;
+
+ /// SuperClassReferences - uniqued super class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
+
+ /// MetaClassReferences - uniqued meta class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences;
+
+ /// EHTypeReferences - uniqued class ehtype references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences;
+
+ /// NonLegacyDispatchMethods - List of methods for which we do *not* generate
+ /// legacy messaging dispatch.
+ llvm::DenseSet<Selector> NonLegacyDispatchMethods;
+
+ /// DefinedMetaClasses - List of defined meta-classes.
+ std::vector<llvm::GlobalValue*> DefinedMetaClasses;
+
+ /// LegacyDispatchedSelector - Returns true if SEL is not in the list of
+ /// NonLegacyDispatchMethods; false otherwise.
+ bool LegacyDispatchedSelector(Selector Sel);
+
+ /// FinishNonFragileABIModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishNonFragileABIModule();
+
+ /// AddModuleClassList - Add the given list of class pointers to the
+ /// module with the provided symbol and section names.
+ void AddModuleClassList(const std::vector<llvm::GlobalValue*> &Container,
+ const char *SymbolName,
+ const char *SectionName);
+
+ llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID);
+ llvm::GlobalVariable * BuildClassMetaData(std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListnfABITy.
+ llvm::Constant *EmitMethodList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods);
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListnfABIPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID);
+
+ llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int offset);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(llvm::Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs);
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ llvm::GlobalVariable *GetClassGlobal(const std::string &Name);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class reference.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given super class reference.
+ llvm::Value *EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClassRef - Return a Value * of the address of _class_t
+ /// meta-data
+ llvm::Value *EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+ /// the given ivar.
+ ///
+ llvm::GlobalVariable * ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval=false);
+
+ /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
+ /// interface. The return value has type EHTypePtrTy.
+ llvm::Constant *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition);
+
+ const char *getMetaclassSymbolPrefix() const {
+ return "OBJC_METACLASS_$_";
+ }
+
+ const char *getClassSymbolPrefix() const {
+ return "OBJC_CLASS_$_";
+ }
+
+ void GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize);
+
+ // Shamelessly stolen from Analysis/CFRefCount.cpp
+ Selector GetNullarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(0, &II);
+ }
+
+ Selector GetUnarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(1, &II);
+ }
+
+ /// ImplementationIsNonLazy - Check whether the given category or
+ /// class implementation is "non-lazy".
+ bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const;
+
+public:
+ CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
+ // FIXME. All stubs for now!
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lvalue = false)
+ { return EmitSelector(Builder, Sel, lvalue); }
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method)
+ { return EmitSelector(Builder, Method->getSelector()); }
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetEHType(QualType T);
+
+ virtual llvm::Constant *GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+ }
+ virtual llvm::Constant *GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+ }
+
+ virtual llvm::Constant *GetCopyStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+ }
+
+ virtual llvm::Constant *EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+ }
+
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal = false);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src,
+ llvm::Value *size);
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+
+} // end anonymous namespace
+
+/* *** Helper Functions *** */
+
+/// getConstantGEP() - Help routine to construct simple GEPs.
+static llvm::Constant *getConstantGEP(llvm::LLVMContext &VMContext,
+ llvm::Constant *C,
+ unsigned idx0,
+ unsigned idx1) {
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx0),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx1)
+ };
+ return llvm::ConstantExpr::getGetElementPtr(C, Idxs, 2);
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+static bool hasObjCExceptionAttribute(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ if (OID->hasAttr<ObjCExceptionAttr>())
+ return true;
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return hasObjCExceptionAttribute(Context, Super);
+ return false;
+}
+
+/* *** CGObjCMac Public Interface *** */
+
+CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm) {
+ ObjCABI = 1;
+ EmitImageInfo();
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRef(Builder, ID);
+}
+
+/// GetSelector - Return the pointer to the unique'd string for this selector.
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval) {
+ return EmitSelector(Builder, Sel, lval);
+}
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+ return EmitSelector(Builder, Method->getSelector());
+}
+
+llvm::Constant *CGObjCMac::GetEHType(QualType T) {
+ llvm_unreachable("asking for catch type for ObjC type in fragile runtime");
+ return 0;
+}
+
+/// Generate a constant CFString object.
+/*
+ struct __builtin_CFString {
+ const int *isa; // point to __CFConstantStringClassReference
+ int flags;
+ const char *str;
+ long length;
+ };
+*/
+
+/// or Generate a constant NSString object.
+/*
+ struct __builtin_NSString {
+ const int *isa; // point to __NSConstantStringClassReference
+ const char *str;
+ unsigned int length;
+ };
+*/
+
+llvm::Constant *CGObjCCommonMac::GenerateConstantString(
+ const StringLiteral *SL) {
+ return (CGM.getLangOptions().NoConstantCFStrings == 0 ?
+ CGM.GetAddrOfConstantCFString(SL) :
+ CGM.GetAddrOfConstantNSString(SL));
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to 'super' in a class method defined in a category
+ // implementation requires an odd treatment.
+ // If we are in a class method, we must retrieve the
+ // _metaclass_ for the current class, pointed at by
+ // the class's "isa" pointer. The following assumes that
+ // isa" is the first ivar in a class (which it must be).
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ } else {
+ llvm::Value *MetaClassPtr = EmitMetaClassRef(Class);
+ llvm::Value *SuperPtr = CGF.Builder.CreateStructGEP(MetaClassPtr, 1);
+ llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr);
+ Target = Super;
+ }
+ }
+ else if (isCategoryImpl)
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ else {
+ llvm::Value *ClassPtr = EmitSuperClassRef(Class);
+ ClassPtr = CGF.Builder.CreateStructGEP(ClassPtr, 1);
+ Target = CGF.Builder.CreateLoad(ClassPtr);
+ }
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ const llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+ return EmitLegacyMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, ObjCTypes);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ return EmitLegacyMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, ObjCTypes);
+}
+
+CodeGen::RValue
+CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ CallArgList ActualArgs;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
+ ActualArgs.push_back(std::make_pair(RValue::get(Sel),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
+ FunctionType::ExtInfo());
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+
+ if (Method)
+ assert(CGM.getContext().getCanonicalType(Method->getResultType()) ==
+ CGM.getContext().getCanonicalType(ResultType) &&
+ "Result type mismatch!");
+
+ llvm::Constant *Fn = NULL;
+ if (CGM.ReturnTypeUsesSRet(FnInfo)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
+ : ObjCTypes.getSendStretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper)
+ : ObjCTypes.getSendFpretFn(IsSuper);
+ } else {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
+ : ObjCTypes.getSendFn(IsSuper);
+ }
+ Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy));
+ return CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
+}
+
+static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
+ if (FQT.isObjCGCStrong())
+ return Qualifiers::Strong;
+
+ if (FQT.isObjCGCWeak())
+ return Qualifiers::Weak;
+
+ if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
+ return Qualifiers::Strong;
+
+ if (const PointerType *PT = FQT->getAs<PointerType>())
+ return GetGCAttrTypeForType(Ctx, PT->getPointeeType());
+
+ return Qualifiers::GCNone;
+}
+
+llvm::Constant *CGObjCCommonMac::GCBlockLayout(CodeGen::CodeGenFunction &CGF,
+ const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &DeclRefs) {
+ llvm::Constant *NullPtr =
+ llvm::Constant::getNullValue(llvm::Type::getInt8PtrTy(VMContext));
+ if ((CGM.getLangOptions().getGCMode() == LangOptions::NonGC) ||
+ DeclRefs.empty())
+ return NullPtr;
+ bool hasUnion = false;
+ SkipIvars.clear();
+ IvarsInfo.clear();
+ unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
+
+ for (size_t i = 0; i < DeclRefs.size(); ++i) {
+ const BlockDeclRefExpr *BDRE = DeclRefs[i];
+ const ValueDecl *VD = BDRE->getDecl();
+ CharUnits Offset = CGF.BlockDecls[VD];
+ uint64_t FieldOffset = Offset.getQuantity();
+ QualType Ty = VD->getType();
+ assert(!Ty->isArrayType() &&
+ "Array block variable should have been caught");
+ if ((Ty->isRecordType() || Ty->isUnionType()) && !BDRE->isByRef()) {
+ BuildAggrIvarRecordLayout(Ty->getAs<RecordType>(),
+ FieldOffset,
+ true,
+ hasUnion);
+ continue;
+ }
+
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), Ty);
+ unsigned FieldSize = CGM.getContext().getTypeSize(Ty);
+ // __block variables are passed by their descriptior address. So, size
+ // must reflect this.
+ if (BDRE->isByRef())
+ FieldSize = WordSizeInBits;
+ if (GCAttr == Qualifiers::Strong || BDRE->isByRef())
+ IvarsInfo.push_back(GC_IVAR(FieldOffset,
+ FieldSize / WordSizeInBits));
+ else if (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak)
+ SkipIvars.push_back(GC_IVAR(FieldOffset,
+ FieldSize / ByteSizeInBits));
+ }
+
+ if (IvarsInfo.empty())
+ return NullPtr;
+ // Sort on byte position in case we encounterred a union nested in
+ // block variable type's aggregate type.
+ if (hasUnion && !IvarsInfo.empty())
+ std::sort(IvarsInfo.begin(), IvarsInfo.end());
+ if (hasUnion && !SkipIvars.empty())
+ std::sort(SkipIvars.begin(), SkipIvars.end());
+
+ std::string BitMap;
+ llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
+ if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+ printf("\n block variable layout for block: ");
+ const unsigned char *s = (unsigned char*)BitMap.c_str();
+ for (unsigned i = 0; i < BitMap.size(); i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+
+ return C;
+}
+
+llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
+ ObjCTypes.ExternalProtocolPtrTy);
+}
+
+void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ // FIXME: We shouldn't need this, the protocol decl should contain enough
+ // information to tell us whether this was a declaration or a definition.
+ DefinedProtocols.insert(PD->getIdentifier());
+
+ // If we have generated a forward reference to this protocol, emit
+ // it now. Otherwise do nothing, the protocol objects are lazily
+ // emitted.
+ if (Protocols.count(PD->getIdentifier()))
+ GetOrEmitProtocol(PD);
+}
+
+llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
+ if (DefinedProtocols.count(PD->getIdentifier()))
+ return GetOrEmitProtocol(PD);
+ return GetOrEmitProtocolRef(PD);
+}
+
+/*
+// APPLE LOCAL radar 4585769 - Objective-C 1.0 extensions
+struct _objc_protocol {
+struct _objc_protocol_extension *isa;
+char *protocol_name;
+struct _objc_protocol_list *protocol_list;
+struct _objc__method_prototype_list *instance_methods;
+struct _objc__method_prototype_list *class_methods
+};
+
+See EmitProtocolExtension().
+*/
+llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ } else {
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ } else {
+ ClassMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods);
+ Values[1] = GetClassName(PD->getIdentifier());
+ Values[2] =
+ EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+ Values[3] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[4] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::InternalLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+ CGM.AddUsedGlobal(Entry);
+
+ return Entry;
+}
+
+llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+
+ return Entry;
+}
+
+/*
+ struct _objc_protocol_extension {
+ uint32_t size;
+ struct objc_method_description_list *optional_instance_methods;
+ struct objc_method_description_list *optional_class_methods;
+ struct objc_property_list *instance_properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ const ConstantVector &OptInstanceMethods,
+ const ConstantVector &OptClassMethods) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+ std::vector<llvm::Constant*> Values(4);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ OptInstanceMethods);
+ Values[2] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ OptClassMethods);
+ Values[3] = EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(),
+ 0, PD, ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
+ Values[3]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
+
+ // No special section, but goes in llvm.used
+ return CreateMetadataVar("\01L_OBJC_PROTOCOLEXT_" + PD->getName(),
+ Init,
+ 0, 0, true);
+}
+
+/*
+ struct objc_protocol_list {
+ struct objc_protocol_list *next;
+ long count;
+ Protocol *list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolList(llvm::Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ std::vector<llvm::Constant*> ProtocolRefs;
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin));
+
+ // Just return null for empty protocol lists
+ if (ProtocolRefs.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
+
+ std::vector<llvm::Constant*> Values(3);
+ // This field is only used by the runtime.
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy,
+ ProtocolRefs.size() - 1);
+ Values[2] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ 4, false);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
+}
+
+void CGObjCCommonMac::PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+ std::vector<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ std::vector<llvm::Constant*> Prop(2);
+ for (ObjCProtocolDecl::protocol_iterator P = PROTO->protocol_begin(),
+ E = PROTO->protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
+ for (ObjCContainerDecl::prop_iterator I = PROTO->prop_begin(),
+ E = PROTO->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ if (!PropertySet.insert(PD->getIdentifier()))
+ continue;
+ Prop[0] = GetPropertyName(PD->getIdentifier());
+ Prop[1] = GetPropertyTypeString(PD, Container);
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
+ }
+}
+
+/*
+ struct _objc_property {
+ const char * const name;
+ const char * const attributes;
+ };
+
+ struct _objc_property_list {
+ uint32_t entsize; // sizeof (struct _objc_property)
+ uint32_t prop_count;
+ struct _objc_property[prop_count];
+ };
+*/
+llvm::Constant *CGObjCCommonMac::EmitPropertyList(llvm::Twine Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ std::vector<llvm::Constant*> Properties, Prop(2);
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+ for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(),
+ E = OCD->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ PropertySet.insert(PD->getIdentifier());
+ Prop[0] = GetPropertyName(PD->getIdentifier());
+ Prop[1] = GetPropertyTypeString(PD, Container);
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
+ Prop));
+ }
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ P = OID->all_referenced_protocol_begin(),
+ E = OID->all_referenced_protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P),
+ ObjCTypes);
+ }
+ else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) {
+ for (ObjCCategoryDecl::protocol_iterator P = CD->protocol_begin(),
+ E = CD->protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P),
+ ObjCTypes);
+ }
+
+ // Return null for empty list.
+ if (Properties.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+
+ unsigned PropertySize =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
+ Properties.size());
+ Values[2] = llvm::ConstantArray::get(AT, Properties);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" :
+ "__OBJC,__property,regular,no_dead_strip",
+ (ObjCABI == 2) ? 8 : 4,
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
+}
+
+/*
+ struct objc_method_description_list {
+ int count;
+ struct objc_method_description list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ std::vector<llvm::Constant*> Desc(2);
+ Desc[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
+ Desc);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodDescList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
+ Methods.size());
+ Values[1] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodDescriptionListPtrTy);
+}
+
+/*
+ struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ uint32_t size; // <rdar://4585769>
+ struct _objc_property_list *instance_properties;
+ };
+*/
+void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.CategoryTy);
+
+ // FIXME: This is poor design, the OCD should have a pointer to the category
+ // decl. Additionally, note that Category can be null for the @implementation
+ // w/o an @interface case. Sema should just create one for us as it does for
+ // @implementation so everyone else can live life under a clear blue sky.
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+
+ llvm::SmallString<256> ExtName;
+ llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_'
+ << OCD->getName();
+
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ std::vector<llvm::Constant*> Values(7);
+ Values[0] = GetClassName(OCD->getIdentifier());
+ Values[1] = GetClassName(Interface->getIdentifier());
+ LazySymbols.insert(Interface->getIdentifier());
+ Values[2] =
+ EmitMethodList("\01L_OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[3] =
+ EmitMethodList("\01L_OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ if (Category) {
+ Values[4] =
+ EmitProtocolList("\01L_OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ }
+ Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+
+ // If there is no category @interface then there can be no properties.
+ if (Category) {
+ Values[6] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy,
+ Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_CATEGORY_" + ExtName.str(), Init,
+ "__OBJC,__category,regular,no_dead_strip",
+ 4, true);
+ DefinedCategories.push_back(GV);
+ DefinedCategoryNames.insert(ExtName.str());
+}
+
+// FIXME: Get from somewhere?
+enum ClassFlags {
+ eClassFlags_Factory = 0x00001,
+ eClassFlags_Meta = 0x00002,
+ // <rdr://5142207>
+ eClassFlags_HasCXXStructors = 0x02000,
+ eClassFlags_Hidden = 0x20000,
+ eClassFlags_ABI2_Hidden = 0x00010,
+ eClassFlags_ABI2_HasCXXStructors = 0x00004 // <rdr://4923634>
+};
+
+/*
+ struct _objc_class {
+ Class isa;
+ Class super_class;
+ const char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct _objc_cache *cache;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions (<rdr://4585769>)
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+
+ See EmitClassExtension();
+*/
+void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ DefinedSymbols.insert(ID->getIdentifier());
+
+ std::string ClassName = ID->getNameAsString();
+ // FIXME: Gross
+ ObjCInterfaceDecl *Interface =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+ llvm::Constant *Protocols =
+ EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getName(),
+ Interface->all_referenced_protocol_begin(),
+ Interface->all_referenced_protocol_end());
+ unsigned Flags = eClassFlags_Factory;
+ if (ID->getNumIvarInitializers())
+ Flags |= eClassFlags_HasCXXStructors;
+ unsigned Size =
+ CGM.getContext().getASTObjCImplementationLayout(ID).getSize() / 8;
+
+ // FIXME: Set CXX-structors flag.
+ if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ Flags |= eClassFlags_Hidden;
+
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(12);
+ Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
+ if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
+ // Record a reference to the super class.
+ LazySymbols.insert(Super->getIdentifier());
+
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, false);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_INSTANCE_METHODS_" + ID->getName(),
+ "__OBJC,__inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ Values[10] = BuildIvarLayout(ID, true);
+ Values[11] = EmitClassExtension(ID);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+ std::string Name("\01L_OBJC_CLASS_");
+ Name += ClassName;
+ const char *Section = "__OBJC,__class,regular,no_dead_strip";
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GV->setInitializer(Init);
+ GV->setSection(Section);
+ GV->setAlignment(4);
+ CGM.AddUsedGlobal(GV);
+ }
+ else
+ GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ DefinedClasses.push_back(GV);
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ const ConstantVector &Methods) {
+ unsigned Flags = eClassFlags_Meta;
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
+
+ if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ Flags |= eClassFlags_Hidden;
+
+ std::vector<llvm::Constant*> Values(12);
+ // The isa for the metaclass is the root of the hierarchy.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ Values[ 0] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Root->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ // The super class for the metaclass is emitted as the name of the
+ // super class. The runtime fixes this up to point to the
+ // *metaclass* for the super class.
+ if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, true);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_CLASS_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__cls_meth,regular,no_dead_strip",
+ Methods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ // ivar_layout for metaclass is always NULL.
+ Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ // The class extension is always unused for metaclasses.
+ Values[11] = llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+
+ std::string Name("\01L_OBJC_METACLASS_");
+ Name += ID->getNameAsCString();
+
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GV->setInitializer(Init);
+ } else {
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name);
+ }
+ GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
+ GV->setAlignment(4);
+ CGM.AddUsedGlobal(GV);
+
+ return GV;
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "\01L_OBJC_METACLASS_" + ID->getNameAsString();
+
+ // FIXME: Should we look these up somewhere other than the module. Its a bit
+ // silly since we only generate these while processing an implementation, so
+ // exactly one pointer would work if know when we entered/exitted an
+ // implementation block.
+
+ // Check for an existing forward reference.
+ // Previously, metaclass with internal linkage may have been defined.
+ // pass 'true' as 2nd argument so it is returned.
+ if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
+ true)) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ return GV;
+ } else {
+ // Generate as an external reference to keep a consistent
+ // module. This will be patched up when we emit the metaclass.
+ return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ }
+}
+
+llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "\01L_OBJC_CLASS_" + ID->getNameAsString();
+
+ if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
+ true)) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward class metadata reference has incorrect type.");
+ return GV;
+ } else {
+ return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ }
+}
+
+/*
+ struct objc_class_ext {
+ uint32_t size;
+ const char *weak_ivar_layout;
+ struct _objc_property_list *properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = BuildIvarLayout(ID, false);
+ Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
+ return CreateMetadataVar("\01L_OBJC_CLASSEXT_" + ID->getName(),
+ Init, "__OBJC,__class_ext,regular,no_dead_strip",
+ 4, true);
+}
+
+/*
+ struct objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+
+ struct objc_ivar_list {
+ int ivar_count;
+ struct objc_ivar list[count];
+ };
+*/
+llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass) {
+ std::vector<llvm::Constant*> Ivars, Ivar(3);
+
+ // When emitting the root class GCC emits ivar entries for the
+ // actual class structure. It is not clear if we need to follow this
+ // behavior; for now lets try and get away with not doing it. If so,
+ // the cleanest solution would be to make up an ObjCInterfaceDecl
+ // for the class.
+ if (ForClass)
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ ObjCInterfaceDecl *OID =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ Ivar[0] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[1] = GetMethodVarType(IVD);
+ Ivar[2] = llvm::ConstantInt::get(ObjCTypes.IntTy,
+ ComputeIvarBaseOffset(CGM, OID, IVD));
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
+ }
+
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
+ Ivars.size());
+ Values[1] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV;
+ if (ForClass)
+ GV = CreateMetadataVar("\01L_OBJC_CLASS_VARIABLES_" + ID->getName(),
+ Init, "__OBJC,__class_vars,regular,no_dead_strip",
+ 4, true);
+ else
+ GV = CreateMetadataVar("\01L_OBJC_INSTANCE_VARIABLES_" + ID->getName(),
+ Init, "__OBJC,__instance_vars,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
+}
+
+/*
+ struct objc_method {
+ SEL method_name;
+ char *method_types;
+ void *method;
+ };
+
+ struct objc_method_list {
+ struct objc_method_list *obsolete;
+ int count;
+ struct objc_method methods_list[count];
+ };
+*/
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
+ llvm::Function *Fn = GetMethodDefinition(MD);
+ if (!Fn)
+ return 0;
+
+ std::vector<llvm::Constant*> Method(3);
+ Method[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Method[1] = GetMethodVarType(MD);
+ Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodListPtrTy);
+}
+
+llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ llvm::SmallString<256> Name;
+ GetNameForMethod(OMD, CD, Name);
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ llvm::Function *Method =
+ llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ Name.str(),
+ &CGM.getModule());
+ MethodDefinitions.insert(std::make_pair(OMD, Method));
+
+ return Method;
+}
+
+llvm::GlobalVariable *
+CGObjCCommonMac::CreateMetadataVar(llvm::Twine Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed) {
+ const llvm::Type *Ty = Init->getType();
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Ty, false,
+ llvm::GlobalValue::InternalLinkage, Init, Name);
+ if (Section)
+ GV->setSection(Section);
+ if (Align)
+ GV->setAlignment(Align);
+ if (AddToUsed)
+ CGM.AddUsedGlobal(GV);
+ return GV;
+}
+
+llvm::Function *CGObjCMac::ModuleInitFunction() {
+ // Abuse this interface function as a place to finalize.
+ FinishModule();
+ return NULL;
+}
+
+llvm::Constant *CGObjCMac::GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetCopyStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+
+llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+}
+
+void CGObjCMac::EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+namespace {
+ struct PerformFragileFinally : EHScopeStack::Cleanup {
+ const Stmt &S;
+ llvm::Value *SyncArgSlot;
+ llvm::Value *CallTryExitVar;
+ llvm::Value *ExceptionData;
+ ObjCTypesHelper &ObjCTypes;
+ PerformFragileFinally(const Stmt *S,
+ llvm::Value *SyncArgSlot,
+ llvm::Value *CallTryExitVar,
+ llvm::Value *ExceptionData,
+ ObjCTypesHelper *ObjCTypes)
+ : S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar),
+ ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // Check whether we need to call objc_exception_try_exit.
+ // In optimized code, this branch will always be folded.
+ llvm::BasicBlock *FinallyCallExit =
+ CGF.createBasicBlock("finally.call_exit");
+ llvm::BasicBlock *FinallyNoCallExit =
+ CGF.createBasicBlock("finally.no_call_exit");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar),
+ FinallyCallExit, FinallyNoCallExit);
+
+ CGF.EmitBlock(FinallyCallExit);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ CGF.EmitBlock(FinallyNoCallExit);
+
+ if (isa<ObjCAtTryStmt>(S)) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt()) {
+ // Save the current cleanup destination in case there's
+ // control flow inside the finally statement.
+ llvm::Value *CurCleanupDest =
+ CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot());
+
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+
+ if (CGF.HaveInsertPoint()) {
+ CGF.Builder.CreateStore(CurCleanupDest,
+ CGF.getNormalCleanupDestSlot());
+ } else {
+ // Currently, the end of the cleanup must always exist.
+ CGF.EnsureInsertPoint();
+ }
+ }
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ llvm::Value *SyncArg = CGF.Builder.CreateLoad(SyncArgSlot);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
+ ->setDoesNotThrow();
+ }
+ }
+ };
+
+ class FragileHazards {
+ CodeGenFunction &CGF;
+ llvm::SmallVector<llvm::Value*, 20> Locals;
+ llvm::DenseSet<llvm::BasicBlock*> BlocksBeforeTry;
+
+ llvm::InlineAsm *ReadHazard;
+ llvm::InlineAsm *WriteHazard;
+
+ llvm::FunctionType *GetAsmFnType();
+
+ void collectLocals();
+ void emitReadHazard(CGBuilderTy &Builder);
+
+ public:
+ FragileHazards(CodeGenFunction &CGF);
+
+ void emitWriteHazard();
+ void emitHazardsInNewBlocks();
+ };
+}
+
+/// Create the fragile-ABI read and write hazards based on the current
+/// state of the function, which is presumed to be immediately prior
+/// to a @try block. These hazards are used to maintain correct
+/// semantics in the face of optimization and the fragile ABI's
+/// cavalier use of setjmp/longjmp.
+FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
+ collectLocals();
+
+ if (Locals.empty()) return;
+
+ // Collect all the blocks in the function.
+ for (llvm::Function::iterator
+ I = CGF.CurFn->begin(), E = CGF.CurFn->end(); I != E; ++I)
+ BlocksBeforeTry.insert(&*I);
+
+ llvm::FunctionType *AsmFnTy = GetAsmFnType();
+
+ // Create a read hazard for the allocas. This inhibits dead-store
+ // optimizations and forces the values to memory. This hazard is
+ // inserted before any 'throwing' calls in the protected scope to
+ // reflect the possibility that the variables might be read from the
+ // catch block if the call throws.
+ {
+ std::string Constraint;
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I) {
+ if (I) Constraint += ',';
+ Constraint += "*m";
+ }
+
+ ReadHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false);
+ }
+
+ // Create a write hazard for the allocas. This inhibits folding
+ // loads across the hazard. This hazard is inserted at the
+ // beginning of the catch path to reflect the possibility that the
+ // variables might have been written within the protected scope.
+ {
+ std::string Constraint;
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I) {
+ if (I) Constraint += ',';
+ Constraint += "=*m";
+ }
+
+ WriteHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false);
+ }
+}
+
+/// Emit a write hazard at the current location.
+void FragileHazards::emitWriteHazard() {
+ if (Locals.empty()) return;
+
+ CGF.Builder.CreateCall(WriteHazard, Locals.begin(), Locals.end())
+ ->setDoesNotThrow();
+}
+
+void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
+ assert(!Locals.empty());
+ Builder.CreateCall(ReadHazard, Locals.begin(), Locals.end())
+ ->setDoesNotThrow();
+}
+
+/// Emit read hazards in all the protected blocks, i.e. all the blocks
+/// which have been inserted since the beginning of the try.
+void FragileHazards::emitHazardsInNewBlocks() {
+ if (Locals.empty()) return;
+
+ CGBuilderTy Builder(CGF.getLLVMContext());
+
+ // Iterate through all blocks, skipping those prior to the try.
+ for (llvm::Function::iterator
+ FI = CGF.CurFn->begin(), FE = CGF.CurFn->end(); FI != FE; ++FI) {
+ llvm::BasicBlock &BB = *FI;
+ if (BlocksBeforeTry.count(&BB)) continue;
+
+ // Walk through all the calls in the block.
+ for (llvm::BasicBlock::iterator
+ BI = BB.begin(), BE = BB.end(); BI != BE; ++BI) {
+ llvm::Instruction &I = *BI;
+
+ // Ignore instructions that aren't non-intrinsic calls.
+ // These are the only calls that can possibly call longjmp.
+ if (!isa<llvm::CallInst>(I) && !isa<llvm::InvokeInst>(I)) continue;
+ if (isa<llvm::IntrinsicInst>(I))
+ continue;
+
+ // Ignore call sites marked nounwind. This may be questionable,
+ // since 'nounwind' doesn't necessarily mean 'does not call longjmp'.
+ llvm::CallSite CS(&I);
+ if (CS.doesNotThrow()) continue;
+
+ // Insert a read hazard before the call. This will ensure that
+ // any writes to the locals are performed before making the
+ // call. If the call throws, then this is sufficient to
+ // guarantee correctness as long as it doesn't also write to any
+ // locals.
+ Builder.SetInsertPoint(&BB, BI);
+ emitReadHazard(Builder);
+ }
+ }
+}
+
+static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, llvm::Value *V) {
+ if (V) S.insert(V);
+}
+
+void FragileHazards::collectLocals() {
+ // Compute a set of allocas to ignore.
+ llvm::DenseSet<llvm::Value*> AllocasToIgnore;
+ addIfPresent(AllocasToIgnore, CGF.ReturnValue);
+ addIfPresent(AllocasToIgnore, CGF.NormalCleanupDest);
+ addIfPresent(AllocasToIgnore, CGF.EHCleanupDest);
+
+ // Collect all the allocas currently in the function. This is
+ // probably way too aggressive.
+ llvm::BasicBlock &Entry = CGF.CurFn->getEntryBlock();
+ for (llvm::BasicBlock::iterator
+ I = Entry.begin(), E = Entry.end(); I != E; ++I)
+ if (isa<llvm::AllocaInst>(*I) && !AllocasToIgnore.count(&*I))
+ Locals.push_back(&*I);
+}
+
+llvm::FunctionType *FragileHazards::GetAsmFnType() {
+ std::vector<const llvm::Type *> Tys(Locals.size());
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I)
+ Tys[I] = Locals[I]->getType();
+ return llvm::FunctionType::get(CGF.Builder.getVoidTy(), Tys, false);
+}
+
+/*
+
+ Objective-C setjmp-longjmp (sjlj) Exception Handling
+ --
+
+ A catch buffer is a setjmp buffer plus:
+ - a pointer to the exception that was caught
+ - a pointer to the previous exception data buffer
+ - two pointers of reserved storage
+ Therefore catch buffers form a stack, with a pointer to the top
+ of the stack kept in thread-local storage.
+
+ objc_exception_try_enter pushes a catch buffer onto the EH stack.
+ objc_exception_try_exit pops the given catch buffer, which is
+ required to be the top of the EH stack.
+ objc_exception_throw pops the top of the EH stack, writes the
+ thrown exception into the appropriate field, and longjmps
+ to the setjmp buffer. It crashes the process (with a printf
+ and an abort()) if there are no catch buffers on the stack.
+ objc_exception_extract just reads the exception pointer out of the
+ catch buffer.
+
+ There's no reason an implementation couldn't use a light-weight
+ setjmp here --- something like __builtin_setjmp, but API-compatible
+ with the heavyweight setjmp. This will be more important if we ever
+ want to implement correct ObjC/C++ exception interactions for the
+ fragile ABI.
+
+ Note that for this use of setjmp/longjmp to be correct, we may need
+ to mark some local variables volatile: if a non-volatile local
+ variable is modified between the setjmp and the longjmp, it has
+ indeterminate value. For the purposes of LLVM IR, it may be
+ sufficient to make loads and stores within the @try (to variables
+ declared outside the @try) volatile. This is necessary for
+ optimized correctness, but is not currently being done; this is
+ being tracked as rdar://problem/8160285
+
+ The basic framework for a @try-catch-finally is as follows:
+ {
+ objc_exception_data d;
+ id _rethrow = null;
+ bool _call_try_exit = true;
+
+ objc_exception_try_enter(&d);
+ if (!setjmp(d.jmp_buf)) {
+ ... try body ...
+ } else {
+ // exception path
+ id _caught = objc_exception_extract(&d);
+
+ // enter new try scope for handlers
+ if (!setjmp(d.jmp_buf)) {
+ ... match exception and execute catch blocks ...
+
+ // fell off end, rethrow.
+ _rethrow = _caught;
+ ... jump-through-finally to finally_rethrow ...
+ } else {
+ // exception in catch block
+ _rethrow = objc_exception_extract(&d);
+ _call_try_exit = false;
+ ... jump-through-finally to finally_rethrow ...
+ }
+ }
+ ... jump-through-finally to finally_end ...
+
+ finally:
+ if (_call_try_exit)
+ objc_exception_try_exit(&d);
+
+ ... finally block ....
+ ... dispatch to finally destination ...
+
+ finally_rethrow:
+ objc_exception_throw(_rethrow);
+
+ finally_end:
+ }
+
+ This framework differs slightly from the one gcc uses, in that gcc
+ uses _rethrow to determine if objc_exception_try_exit should be called
+ and if the object should be rethrown. This breaks in the face of
+ throwing nil and introduces unnecessary branches.
+
+ We specialize this framework for a few particular circumstances:
+
+ - If there are no catch blocks, then we avoid emitting the second
+ exception handling context.
+
+ - If there is a catch-all catch block (i.e. @catch(...) or @catch(id
+ e)) we avoid emitting the code to rethrow an uncaught exception.
+
+ - FIXME: If there is no @finally block we can do a few more
+ simplifications.
+
+ Rethrows and Jumps-Through-Finally
+ --
+
+ '@throw;' is supported by pushing the currently-caught exception
+ onto ObjCEHStack while the @catch blocks are emitted.
+
+ Branches through the @finally block are handled with an ordinary
+ normal cleanup. We do not register an EH cleanup; fragile-ABI ObjC
+ exceptions are not compatible with C++ exceptions, and this is
+ hardly the only place where this will go wrong.
+
+ @synchronized(expr) { stmt; } is emitted as if it were:
+ id synch_value = expr;
+ objc_sync_enter(synch_value);
+ @try { stmt; } @finally { objc_sync_exit(synch_value); }
+*/
+
+void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ bool isTry = isa<ObjCAtTryStmt>(S);
+
+ // A destination for the fall-through edges of the catch handlers to
+ // jump to.
+ CodeGenFunction::JumpDest FinallyEnd =
+ CGF.getJumpDestInCurrentScope("finally.end");
+
+ // A destination for the rethrow edge of the catch handlers to jump
+ // to.
+ CodeGenFunction::JumpDest FinallyRethrow =
+ CGF.getJumpDestInCurrentScope("finally.rethrow");
+
+ // For @synchronized, call objc_sync_enter(sync.expr). The
+ // evaluation of the expression must occur before we enter the
+ // @synchronized. We can't avoid a temp here because we need the
+ // value to be preserved. If the backend ever does liveness
+ // correctly after setjmp, this will be unnecessary.
+ llvm::Value *SyncArgSlot = 0;
+ if (!isTry) {
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
+ ->setDoesNotThrow();
+
+ SyncArgSlot = CGF.CreateTempAlloca(SyncArg->getType(), "sync.arg");
+ CGF.Builder.CreateStore(SyncArg, SyncArgSlot);
+ }
+
+ // Allocate memory for the setjmp buffer. This needs to be kept
+ // live throughout the try and catch blocks.
+ llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
+ "exceptiondata.ptr");
+
+ // Create the fragile hazards. Note that this will not capture any
+ // of the allocas required for exception processing, but will
+ // capture the current basic block (which extends all the way to the
+ // setjmp call) as "before the @try".
+ FragileHazards Hazards(CGF);
+
+ // Create a flag indicating whether the cleanup needs to call
+ // objc_exception_try_exit. This is true except when
+ // - no catches match and we're branching through the cleanup
+ // just to rethrow the exception, or
+ // - a catch matched and we're falling out of the catch handler.
+ // The setjmp-safety rule here is that we should always store to this
+ // variable in a place that dominates the branch through the cleanup
+ // without passing through any setjmps.
+ llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ "_call_try_exit");
+
+ // Push a normal cleanup to leave the try scope.
+ CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalCleanup, &S,
+ SyncArgSlot,
+ CallTryExitVar,
+ ExceptionData,
+ &ObjCTypes);
+
+ // Enter a try block:
+ // - Call objc_exception_try_enter to push ExceptionData on top of
+ // the EH stack.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ // - Call setjmp on the exception data buffer.
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
+ llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
+ llvm::Value *SetJmpBuffer =
+ CGF.Builder.CreateGEP(ExceptionData, GEPIndexes, GEPIndexes+3, "setjmp_buffer");
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
+ SetJmpResult->setDoesNotThrow();
+
+ // If setjmp returned 0, enter the protected block; otherwise,
+ // branch to the handler.
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ llvm::Value *DidCatch =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
+ CGF.Builder.CreateCondBr(DidCatch, TryHandler, TryBlock);
+
+ // Emit the protected block.
+ CGF.EmitBlock(TryBlock);
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+
+ CGBuilderTy::InsertPoint TryFallthroughIP = CGF.Builder.saveAndClearIP();
+
+ // Emit the exception handler block.
+ CGF.EmitBlock(TryHandler);
+
+ // Don't optimize loads of the in-scope locals across this point.
+ Hazards.emitWriteHazard();
+
+ // For a @synchronized (or a @try with no catches), just branch
+ // through the cleanup to the rethrow block.
+ if (!isTry || !cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ // Tell the cleanup not to re-pop the exit.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ // Otherwise, we have to match against the caught exceptions.
+ } else {
+ // Retrieve the exception object. We may emit multiple blocks but
+ // nothing can cross this so the value is already in SSA form.
+ llvm::CallInst *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ Caught->setDoesNotThrow();
+
+ // Push the exception to rethrow onto the EH value stack for the
+ // benefit of any @throws in the handlers.
+ CGF.ObjCEHValueStack.push_back(Caught);
+
+ const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S);
+
+ bool HasFinally = (AtTryStmt->getFinallyStmt() != 0);
+
+ llvm::BasicBlock *CatchBlock = 0;
+ llvm::BasicBlock *CatchHandler = 0;
+ if (HasFinally) {
+ // Enter a new exception try block (in case a @catch block
+ // throws an exception).
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer,
+ "setjmp.result");
+ SetJmpResult->setDoesNotThrow();
+
+ llvm::Value *Threw =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
+
+ CatchBlock = CGF.createBasicBlock("catch");
+ CatchHandler = CGF.createBasicBlock("catch_for_catch");
+ CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
+
+ CGF.EmitBlock(CatchBlock);
+ }
+
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(HasFinally), CallTryExitVar);
+
+ // Handle catch list. As a special case we check if everything is
+ // matched and avoid generating code for falling off the end if
+ // so.
+ bool AllMatched = false;
+ for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I);
+
+ const VarDecl *CatchParam = CatchStmt->getCatchParamDecl();
+ const ObjCObjectPointerType *OPT = 0;
+
+ // catch(...) always matches.
+ if (!CatchParam) {
+ AllMatched = true;
+ } else {
+ OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>();
+
+ // catch(id e) always matches under this ABI, since only
+ // ObjC exceptions end up here in the first place.
+ // FIXME: For the time being we also match id<X>; this should
+ // be rejected by Sema instead.
+ if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType()))
+ AllMatched = true;
+ }
+
+ // If this is a catch-all, we don't need to test anything.
+ if (AllMatched) {
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
+ if (CatchParam) {
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // These types work out because ConvertType(id) == i8*.
+ CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // The scope of the catch variable ends right here.
+ CatchVarCleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+ break;
+ }
+
+ assert(OPT && "Unexpected non-object pointer type in @catch");
+ const ObjCObjectType *ObjTy = OPT->getObjectType();
+
+ // FIXME: @catch (Class c) ?
+ ObjCInterfaceDecl *IDecl = ObjTy->getInterface();
+ assert(IDecl && "Catch parameter must have Objective-C type!");
+
+ // Check if the @catch block matches the exception object.
+ llvm::Value *Class = EmitClassRef(CGF.Builder, IDecl);
+
+ llvm::CallInst *Match =
+ CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
+ Class, Caught, "match");
+ Match->setDoesNotThrow();
+
+ llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("match");
+ llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch.next");
+
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
+ MatchedBlock, NextCatchBlock);
+
+ // Emit the @catch block.
+ CGF.EmitBlock(MatchedBlock);
+
+ // Collect any cleanups for the catch variable. The scope lasts until
+ // the end of the catch body.
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // Initialize the catch variable.
+ llvm::Value *Tmp =
+ CGF.Builder.CreateBitCast(Caught,
+ CGF.ConvertType(CatchParam->getType()),
+ "tmp");
+ CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // We're done with the catch variable.
+ CatchVarCleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ CGF.EmitBlock(NextCatchBlock);
+ }
+
+ CGF.ObjCEHValueStack.pop_back();
+
+ // If nothing wanted anything to do with the caught exception,
+ // kill the extract call.
+ if (Caught->use_empty())
+ Caught->eraseFromParent();
+
+ if (!AllMatched)
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ if (HasFinally) {
+ // Emit the exception handler for the @catch blocks.
+ CGF.EmitBlock(CatchHandler);
+
+ // In theory we might now need a write hazard, but actually it's
+ // unnecessary because there's no local-accessing code between
+ // the try's write hazard and here.
+ //Hazards.emitWriteHazard();
+
+ // Don't pop the catch handler; the throw already did.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ }
+
+ // Insert read hazards as required in the new blocks.
+ Hazards.emitHazardsInNewBlocks();
+
+ // Pop the cleanup.
+ CGF.Builder.restoreIP(TryFallthroughIP);
+ if (CGF.HaveInsertPoint())
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar);
+ CGF.PopCleanupBlock();
+ CGF.EmitBlock(FinallyEnd.getBlock(), true);
+
+ // Emit the rethrow block.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(FinallyRethrow.getBlock(), true);
+ if (CGF.HaveInsertPoint()) {
+ // Just look in the buffer for the exception to throw.
+ llvm::CallInst *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData);
+ Caught->setDoesNotThrow();
+
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), Caught)
+ ->setDoesNotThrow();
+ CGF.Builder.CreateUnreachable();
+ }
+
+ CGF.Builder.restoreIP(SavedIP);
+}
+
+void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ const llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
+ ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ if (!threadlocal)
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ else
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignThreadLocalFn(),
+ src, dst, "threadlocalassign");
+ return;
+}
+
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t ivaroffset)
+///
+void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, ivarOffset);
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *size) {
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
+ DestPtr, SrcPtr, size);
+ return;
+}
+
+/// EmitObjCValueForIvar - Code Gen for ivar reference.
+///
+LValue CGObjCMac::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ uint64_t Offset = ComputeIvarBaseOffset(CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy),
+ Offset);
+}
+
+/* *** Private Interface *** */
+
+/// EmitImageInfo - Emit the image info marker used to encode some module
+/// level information.
+///
+/// See: <rdr://4810609&4810587&4810587>
+/// struct IMAGE_INFO {
+/// unsigned version;
+/// unsigned flags;
+/// };
+enum ImageInfoFlags {
+ eImageInfo_FixAndContinue = (1 << 0),
+ eImageInfo_GarbageCollected = (1 << 1),
+ eImageInfo_GCOnly = (1 << 2),
+ eImageInfo_OptimizedByDyld = (1 << 3), // FIXME: When is this set.
+
+ // A flag indicating that the module has no instances of a @synthesize of a
+ // superclass variable. <rdar://problem/6803242>
+ eImageInfo_CorrectedSynthesize = (1 << 4)
+};
+
+void CGObjCCommonMac::EmitImageInfo() {
+ unsigned version = 0; // Version is unused?
+ unsigned flags = 0;
+
+ // FIXME: Fix and continue?
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC)
+ flags |= eImageInfo_GarbageCollected;
+ if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ flags |= eImageInfo_GCOnly;
+
+ // We never allow @synthesize of a superclass property.
+ flags |= eImageInfo_CorrectedSynthesize;
+
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+
+ // Emitted as int[2];
+ llvm::Constant *values[2] = {
+ llvm::ConstantInt::get(Int32Ty, version),
+ llvm::ConstantInt::get(Int32Ty, flags)
+ };
+ llvm::ArrayType *AT = llvm::ArrayType::get(Int32Ty, 2);
+
+ const char *Section;
+ if (ObjCABI == 1)
+ Section = "__OBJC, __image_info,regular";
+ else
+ Section = "__DATA, __objc_imageinfo, regular, no_dead_strip";
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_IMAGE_INFO",
+ llvm::ConstantArray::get(AT, values, 2),
+ Section,
+ 0,
+ true);
+ GV->setConstant(true);
+}
+
+
+// struct objc_module {
+// unsigned long version;
+// unsigned long size;
+// const char *name;
+// Symtab symtab;
+// };
+
+// FIXME: Get from somewhere
+static const int ModuleVersion = 7;
+
+void CGObjCMac::EmitModuleInfo() {
+ uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
+
+ std::vector<llvm::Constant*> Values(4);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ Values[2] = GetClassName(&CGM.getContext().Idents.get(""));
+ Values[3] = EmitModuleSymbols();
+ CreateMetadataVar("\01L_OBJC_MODULES",
+ llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
+ "__OBJC,__module_info,regular,no_dead_strip",
+ 4, true);
+}
+
+llvm::Constant *CGObjCMac::EmitModuleSymbols() {
+ unsigned NumClasses = DefinedClasses.size();
+ unsigned NumCategories = DefinedCategories.size();
+
+ // Return null if no symbols were defined.
+ if (!NumClasses && !NumCategories)
+ return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
+ Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
+ Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories);
+
+ // The runtime expects exactly the list of defined classes followed
+ // by the list of defined categories, in a single array.
+ std::vector<llvm::Constant*> Symbols(NumClasses + NumCategories);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
+ ObjCTypes.Int8PtrTy);
+ for (unsigned i=0; i<NumCategories; i++)
+ Symbols[NumClasses + i] =
+ llvm::ConstantExpr::getBitCast(DefinedCategories[i],
+ ObjCTypes.Int8PtrTy);
+
+ Values[4] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ NumClasses + NumCategories),
+ Symbols);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
+ "__OBJC,__symbols,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
+}
+
+llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ LazySymbols.insert(ID->getIdentifier());
+
+ llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetClassName(ID->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ return Builder.CreateLoad(Entry, "tmp");
+}
+
+llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lvalue) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_SELECTOR_REFERENCES_", Casted,
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ if (lvalue)
+ return Entry;
+ return Builder.CreateLoad(Entry, "tmp");
+}
+
+llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = ClassNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantArray::get(VMContext,
+ Ident->getNameStart()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Function *CGObjCCommonMac::GetMethodDefinition(const ObjCMethodDecl *MD) {
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*>::iterator
+ I = MethodDefinitions.find(MD);
+ if (I != MethodDefinitions.end())
+ return I->second;
+
+ if (MD->hasBody() && MD->getPCHLevel() > 0) {
+ // MD isn't emitted yet because it comes from PCH.
+ CGM.EmitTopLevelDecl(const_cast<ObjCMethodDecl*>(MD));
+ assert(MethodDefinitions[MD] && "EmitTopLevelDecl didn't emit the method!");
+ return MethodDefinitions[MD];
+ }
+
+ return NULL;
+}
+
+/// GetIvarLayoutName - Returns a unique constant for the given
+/// ivar layout bitmap.
+llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+}
+
+void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos,
+ bool ForStrongLayout,
+ bool &HasUnion) {
+ const RecordDecl *RD = RT->getDecl();
+ // FIXME - Use iterator.
+ llvm::SmallVector<FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
+ const llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ const llvm::StructLayout *RecLayout =
+ CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
+
+ BuildAggrIvarLayout(0, RecLayout, RD, Fields, BytePos,
+ ForStrongLayout, HasUnion);
+}
+
+void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion) {
+ bool IsUnion = (RD && RD->isUnion());
+ uint64_t MaxUnionIvarSize = 0;
+ uint64_t MaxSkippedUnionIvarSize = 0;
+ FieldDecl *MaxField = 0;
+ FieldDecl *MaxSkippedField = 0;
+ FieldDecl *LastFieldBitfield = 0;
+ uint64_t MaxFieldOffset = 0;
+ uint64_t MaxSkippedFieldOffset = 0;
+ uint64_t LastBitfieldOffset = 0;
+
+ if (RecFields.empty())
+ return;
+ unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
+
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ FieldDecl *Field = RecFields[i];
+ uint64_t FieldOffset;
+ if (RD) {
+ // Note that 'i' here is actually the field index inside RD of Field,
+ // although this dependency is hidden.
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ FieldOffset = RL.getFieldOffset(i) / 8;
+ } else
+ FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
+
+ // Skip over unnamed or bitfields
+ if (!Field->getIdentifier() || Field->isBitField()) {
+ LastFieldBitfield = Field;
+ LastBitfieldOffset = FieldOffset;
+ continue;
+ }
+
+ LastFieldBitfield = 0;
+ QualType FQT = Field->getType();
+ if (FQT->isRecordType() || FQT->isUnionType()) {
+ if (FQT->isUnionType())
+ HasUnion = true;
+
+ BuildAggrIvarRecordLayout(FQT->getAs<RecordType>(),
+ BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+ continue;
+ }
+
+ if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ uint64_t ElCount = CArray->getSize().getZExtValue();
+ assert(CArray && "only array with known element size is supported");
+ FQT = CArray->getElementType();
+ while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ ElCount *= CArray->getSize().getZExtValue();
+ FQT = CArray->getElementType();
+ }
+
+ assert(!FQT->isUnionType() &&
+ "layout for array of unions not supported");
+ if (FQT->isRecordType()) {
+ int OldIndex = IvarsInfo.size() - 1;
+ int OldSkIndex = SkipIvars.size() -1;
+
+ const RecordType *RT = FQT->getAs<RecordType>();
+ BuildAggrIvarRecordLayout(RT, BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+
+ // Replicate layout information for each array element. Note that
+ // one element is already done.
+ uint64_t ElIx = 1;
+ for (int FirstIndex = IvarsInfo.size() - 1,
+ FirstSkIndex = SkipIvars.size() - 1 ;ElIx < ElCount; ElIx++) {
+ uint64_t Size = CGM.getContext().getTypeSize(RT)/ByteSizeInBits;
+ for (int i = OldIndex+1; i <= FirstIndex; ++i)
+ IvarsInfo.push_back(GC_IVAR(IvarsInfo[i].ivar_bytepos + Size*ElIx,
+ IvarsInfo[i].ivar_size));
+ for (int i = OldSkIndex+1; i <= FirstSkIndex; ++i)
+ SkipIvars.push_back(GC_IVAR(SkipIvars[i].ivar_bytepos + Size*ElIx,
+ SkipIvars[i].ivar_size));
+ }
+ continue;
+ }
+ }
+ // At this point, we are done with Record/Union and array there of.
+ // For other arrays we are down to its element type.
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), FQT);
+
+ unsigned FieldSize = CGM.getContext().getTypeSize(Field->getType());
+ if ((ForStrongLayout && GCAttr == Qualifiers::Strong)
+ || (!ForStrongLayout && GCAttr == Qualifiers::Weak)) {
+ if (IsUnion) {
+ uint64_t UnionIvarSize = FieldSize / WordSizeInBits;
+ if (UnionIvarSize > MaxUnionIvarSize) {
+ MaxUnionIvarSize = UnionIvarSize;
+ MaxField = Field;
+ MaxFieldOffset = FieldOffset;
+ }
+ } else {
+ IvarsInfo.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / WordSizeInBits));
+ }
+ } else if ((ForStrongLayout &&
+ (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak))
+ || (!ForStrongLayout && GCAttr != Qualifiers::Weak)) {
+ if (IsUnion) {
+ // FIXME: Why the asymmetry? We divide by word size in bits on other
+ // side.
+ uint64_t UnionIvarSize = FieldSize;
+ if (UnionIvarSize > MaxSkippedUnionIvarSize) {
+ MaxSkippedUnionIvarSize = UnionIvarSize;
+ MaxSkippedField = Field;
+ MaxSkippedFieldOffset = FieldOffset;
+ }
+ } else {
+ // FIXME: Why the asymmetry, we divide by byte size in bits here?
+ SkipIvars.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / ByteSizeInBits));
+ }
+ }
+ }
+
+ if (LastFieldBitfield) {
+ // Last field was a bitfield. Must update skip info.
+ Expr *BitWidth = LastFieldBitfield->getBitWidth();
+ uint64_t BitFieldSize =
+ BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ GC_IVAR skivar;
+ skivar.ivar_bytepos = BytePos + LastBitfieldOffset;
+ skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
+ + ((BitFieldSize % ByteSizeInBits) != 0);
+ SkipIvars.push_back(skivar);
+ }
+
+ if (MaxField)
+ IvarsInfo.push_back(GC_IVAR(BytePos + MaxFieldOffset,
+ MaxUnionIvarSize));
+ if (MaxSkippedField)
+ SkipIvars.push_back(GC_IVAR(BytePos + MaxSkippedFieldOffset,
+ MaxSkippedUnionIvarSize));
+}
+
+/// BuildIvarLayoutBitmap - This routine is the horsework for doing all
+/// the computations and returning the layout bitmap (for ivar or blocks) in
+/// the given argument BitMap string container. Routine reads
+/// two containers, IvarsInfo and SkipIvars which are assumed to be
+/// filled already by the caller.
+llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string& BitMap) {
+ unsigned int WordsToScan, WordsToSkip;
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ // Build the string of skip/scan nibbles
+ llvm::SmallVector<SKIP_SCAN, 32> SkipScanIvars;
+ unsigned int WordSize =
+ CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
+ if (IvarsInfo[0].ivar_bytepos == 0) {
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ } else {
+ WordsToSkip = IvarsInfo[0].ivar_bytepos/WordSize;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ }
+ for (unsigned int i=1, Last=IvarsInfo.size(); i != Last; i++) {
+ unsigned int TailPrevGCObjC =
+ IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize;
+ if (IvarsInfo[i].ivar_bytepos == TailPrevGCObjC) {
+ // consecutive 'scanned' object pointers.
+ WordsToScan += IvarsInfo[i].ivar_size;
+ } else {
+ // Skip over 'gc'able object pointer which lay over each other.
+ if (TailPrevGCObjC > IvarsInfo[i].ivar_bytepos)
+ continue;
+ // Must skip over 1 or more words. We save current skip/scan values
+ // and start a new pair.
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+
+ // Skip the hole.
+ SkScan.skip = (IvarsInfo[i].ivar_bytepos - TailPrevGCObjC) / WordSize;
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[i].ivar_size;
+ }
+ }
+ if (WordsToScan > 0) {
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+ }
+
+ if (!SkipIvars.empty()) {
+ unsigned int LastIndex = SkipIvars.size()-1;
+ int LastByteSkipped =
+ SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size;
+ LastIndex = IvarsInfo.size()-1;
+ int LastByteScanned =
+ IvarsInfo[LastIndex].ivar_bytepos +
+ IvarsInfo[LastIndex].ivar_size * WordSize;
+ // Compute number of bytes to skip at the tail end of the last ivar scanned.
+ if (LastByteSkipped > LastByteScanned) {
+ unsigned int TotalWords = (LastByteSkipped + (WordSize -1)) / WordSize;
+ SKIP_SCAN SkScan;
+ SkScan.skip = TotalWords - (LastByteScanned/WordSize);
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ }
+ }
+ // Mini optimization of nibbles such that an 0xM0 followed by 0x0N is produced
+ // as 0xMN.
+ int SkipScan = SkipScanIvars.size()-1;
+ for (int i = 0; i <= SkipScan; i++) {
+ if ((i < SkipScan) && SkipScanIvars[i].skip && SkipScanIvars[i].scan == 0
+ && SkipScanIvars[i+1].skip == 0 && SkipScanIvars[i+1].scan) {
+ // 0xM0 followed by 0x0N detected.
+ SkipScanIvars[i].scan = SkipScanIvars[i+1].scan;
+ for (int j = i+1; j < SkipScan; j++)
+ SkipScanIvars[j] = SkipScanIvars[j+1];
+ --SkipScan;
+ }
+ }
+
+ // Generate the string.
+ for (int i = 0; i <= SkipScan; i++) {
+ unsigned char byte;
+ unsigned int skip_small = SkipScanIvars[i].skip % 0xf;
+ unsigned int scan_small = SkipScanIvars[i].scan % 0xf;
+ unsigned int skip_big = SkipScanIvars[i].skip / 0xf;
+ unsigned int scan_big = SkipScanIvars[i].scan / 0xf;
+
+ // first skip big.
+ for (unsigned int ix = 0; ix < skip_big; ix++)
+ BitMap += (unsigned char)(0xf0);
+
+ // next (skip small, scan)
+ if (skip_small) {
+ byte = skip_small << 4;
+ if (scan_big > 0) {
+ byte |= 0xf;
+ --scan_big;
+ } else if (scan_small) {
+ byte |= scan_small;
+ scan_small = 0;
+ }
+ BitMap += byte;
+ }
+ // next scan big
+ for (unsigned int ix = 0; ix < scan_big; ix++)
+ BitMap += (unsigned char)(0x0f);
+ // last scan small
+ if (scan_small) {
+ byte = scan_small;
+ BitMap += byte;
+ }
+ }
+ // null terminate string.
+ unsigned char zero = 0;
+ BitMap += zero;
+
+ llvm::GlobalVariable * Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantArray::get(VMContext, BitMap.c_str()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+/// BuildIvarLayout - Builds ivar layout bitmap for the class
+/// implementation for the __strong or __weak case.
+/// The layout map displays which words in ivar list must be skipped
+/// and which must be scanned by GC (see below). String is built of bytes.
+/// Each byte is divided up in two nibbles (4-bit each). Left nibble is count
+/// of words to skip and right nibble is count of words to scan. So, each
+/// nibble represents up to 15 workds to skip or scan. Skipping the rest is
+/// represented by a 0x00 byte which also ends the string.
+/// 1. when ForStrongLayout is true, following ivars are scanned:
+/// - id, Class
+/// - object *
+/// - __strong anything
+///
+/// 2. When ForStrongLayout is false, following ivars are scanned:
+/// - __weak anything
+///
+llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
+ const ObjCImplementationDecl *OMD,
+ bool ForStrongLayout) {
+ bool hasUnion = false;
+
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return llvm::Constant::getNullValue(PtrTy);
+
+ llvm::SmallVector<ObjCIvarDecl*, 32> Ivars;
+ const ObjCInterfaceDecl *OI = OMD->getClassInterface();
+ CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
+
+ llvm::SmallVector<FieldDecl*, 32> RecFields;
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+
+ if (RecFields.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ SkipIvars.clear();
+ IvarsInfo.clear();
+
+ BuildAggrIvarLayout(OMD, 0, 0, RecFields, 0, ForStrongLayout, hasUnion);
+ if (IvarsInfo.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+ // Sort on byte position in case we encounterred a union nested in
+ // the ivar list.
+ if (hasUnion && !IvarsInfo.empty())
+ std::sort(IvarsInfo.begin(), IvarsInfo.end());
+ if (hasUnion && !SkipIvars.empty())
+ std::sort(SkipIvars.begin(), SkipIvars.end());
+
+ std::string BitMap;
+ llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
+
+ if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+ printf("\n%s ivar layout for class '%s': ",
+ ForStrongLayout ? "strong" : "weak",
+ OMD->getClassInterface()->getName().data());
+ const unsigned char *s = (unsigned char*)BitMap.c_str();
+ for (unsigned i = 0; i < BitMap.size(); i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+ return C;
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
+ llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
+
+ // FIXME: Avoid std::string copying.
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
+ llvm::ConstantArray::get(VMContext, Sel.getAsString()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(IdentifierInfo *ID) {
+ return GetMethodVarName(CGM.getContext().Selectors.getNullarySelector(ID));
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantArray::get(VMContext, TypeStr),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(const_cast<ObjCMethodDecl*>(D),
+ TypeStr);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantArray::get(VMContext, TypeStr),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = PropertyNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_PROP_NAME_ATTR_",
+ llvm::ConstantArray::get(VMContext,
+ Ident->getNameStart()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+// FIXME: This Decl should be more precise.
+llvm::Constant *
+CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
+}
+
+void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
+ const ObjCContainerDecl *CD,
+ llvm::SmallVectorImpl<char> &Name) {
+ llvm::raw_svector_ostream OS(Name);
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << '\01' << (D->isInstanceMethod() ? '-' : '+')
+ << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
+ OS << '(' << CID << ')';
+ OS << ' ' << D->getSelector().getAsString() << ']';
+}
+
+void CGObjCMac::FinishModule() {
+ EmitModuleInfo();
+
+ // Emit the dummy bodies for any protocols which were referenced but
+ // never defined.
+ for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator
+ I = Protocols.begin(), e = Protocols.end(); I != e; ++I) {
+ if (I->second->hasInitializer())
+ continue;
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+ Values[1] = GetClassName(I->first);
+ Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[3] = Values[4] =
+ llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+ I->second->setLinkage(llvm::GlobalValue::InternalLinkage);
+ I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values));
+ CGM.AddUsedGlobal(I->second);
+ }
+
+ // Add assembler directives to add lazy undefined symbol references
+ // for classes which are referenced but not defined. This is
+ // important for correct linker interaction.
+ //
+ // FIXME: It would be nice if we had an LLVM construct for this.
+ if (!LazySymbols.empty() || !DefinedSymbols.empty()) {
+ llvm::SmallString<256> Asm;
+ Asm += CGM.getModule().getModuleInlineAsm();
+ if (!Asm.empty() && Asm.back() != '\n')
+ Asm += '\n';
+
+ llvm::raw_svector_ostream OS(Asm);
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = DefinedSymbols.begin(),
+ e = DefinedSymbols.end(); I != e; ++I)
+ OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
+ << "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
+ e = LazySymbols.end(); I != e; ++I) {
+ OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
+ }
+
+ for (size_t i = 0; i < DefinedCategoryNames.size(); ++i) {
+ OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n"
+ << "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n";
+ }
+
+ CGM.getModule().setModuleInlineAsm(OS.str());
+ }
+}
+
+CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
+ : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm) {
+ ObjCEmptyCacheVar = ObjCEmptyVtableVar = NULL;
+ ObjCABI = 2;
+}
+
+/* *** */
+
+ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
+ : VMContext(cgm.getLLVMContext()), CGM(cgm) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ ShortTy = Types.ConvertType(Ctx.ShortTy);
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ LongTy = Types.ConvertType(Ctx.LongTy);
+ LongLongTy = Types.ConvertType(Ctx.LongLongTy);
+ Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
+ PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
+ SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
+
+ // FIXME: It would be nice to unify this with the opaque type, so that the IR
+ // comes out a bit cleaner.
+ const llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
+
+ // I'm not sure I like this. The implicit coordination is a bit
+ // gross. We should solve this in a reasonable fashion because this
+ // is a pretty common task (match some runtime data structure with
+ // an LLVM data structure).
+
+ // FIXME: This is leaked.
+ // FIXME: Merge with rewriter code?
+
+ // struct _objc_super {
+ // id self;
+ // Class cls;
+ // }
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
+ Ctx.getTranslationUnitDecl(),
+ SourceLocation(),
+ &Ctx.Idents.get("_objc_super"));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCIdType(), 0, 0, false));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCClassType(), 0, 0, false));
+ RD->completeDefinition();
+
+ SuperCTy = Ctx.getTagDeclType(RD);
+ SuperPtrCTy = Ctx.getPointerType(SuperCTy);
+
+ SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
+ SuperPtrTy = llvm::PointerType::getUnqual(SuperTy);
+
+ // struct _prop_t {
+ // char *name;
+ // char *attributes;
+ // }
+ PropertyTy = llvm::StructType::get(VMContext, Int8PtrTy, Int8PtrTy, NULL);
+ CGM.getModule().addTypeName("struct._prop_t",
+ PropertyTy);
+
+ // struct _prop_list_t {
+ // uint32_t entsize; // sizeof(struct _prop_t)
+ // uint32_t count_of_properties;
+ // struct _prop_t prop_list[count_of_properties];
+ // }
+ PropertyListTy = llvm::StructType::get(VMContext, IntTy,
+ IntTy,
+ llvm::ArrayType::get(PropertyTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._prop_list_t",
+ PropertyListTy);
+ // struct _prop_list_t *
+ PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
+
+ // struct _objc_method {
+ // SEL _cmd;
+ // char *method_type;
+ // char *_imp;
+ // }
+ MethodTy = llvm::StructType::get(VMContext, SelectorPtrTy,
+ Int8PtrTy,
+ Int8PtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method", MethodTy);
+
+ // struct _objc_cache *
+ CacheTy = llvm::OpaqueType::get(VMContext);
+ CGM.getModule().addTypeName("struct._objc_cache", CacheTy);
+ CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
+}
+
+ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm) {
+ // struct _objc_method_description {
+ // SEL name;
+ // char *types;
+ // }
+ MethodDescriptionTy =
+ llvm::StructType::get(VMContext, SelectorPtrTy,
+ Int8PtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method_description",
+ MethodDescriptionTy);
+
+ // struct _objc_method_description_list {
+ // int count;
+ // struct _objc_method_description[1];
+ // }
+ MethodDescriptionListTy =
+ llvm::StructType::get(VMContext, IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method_description_list",
+ MethodDescriptionListTy);
+
+ // struct _objc_method_description_list *
+ MethodDescriptionListPtrTy =
+ llvm::PointerType::getUnqual(MethodDescriptionListTy);
+
+ // Protocol description structures
+
+ // struct _objc_protocol_extension {
+ // uint32_t size; // sizeof(struct _objc_protocol_extension)
+ // struct _objc_method_description_list *optional_instance_methods;
+ // struct _objc_method_description_list *optional_class_methods;
+ // struct _objc_property_list *instance_properties;
+ // }
+ ProtocolExtensionTy =
+ llvm::StructType::get(VMContext, IntTy,
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_protocol_extension",
+ ProtocolExtensionTy);
+
+ // struct _objc_protocol_extension *
+ ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
+
+ // Handle recursive construction of Protocol and ProtocolList types
+
+ llvm::PATypeHolder ProtocolTyHolder = llvm::OpaqueType::get(VMContext);
+ llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
+
+ const llvm::Type *T =
+ llvm::StructType::get(VMContext,
+ llvm::PointerType::getUnqual(ProtocolListTyHolder),
+ LongTy,
+ llvm::ArrayType::get(ProtocolTyHolder, 0),
+ NULL);
+ cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(T);
+
+ // struct _objc_protocol {
+ // struct _objc_protocol_extension *isa;
+ // char *protocol_name;
+ // struct _objc_protocol **_objc_protocol_list;
+ // struct _objc_method_description_list *instance_methods;
+ // struct _objc_method_description_list *class_methods;
+ // }
+ T = llvm::StructType::get(VMContext, ProtocolExtensionPtrTy,
+ Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListTyHolder),
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ NULL);
+ cast<llvm::OpaqueType>(ProtocolTyHolder.get())->refineAbstractTypeTo(T);
+
+ ProtocolListTy = cast<llvm::StructType>(ProtocolListTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_protocol_list",
+ ProtocolListTy);
+ // struct _objc_protocol_list *
+ ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
+
+ ProtocolTy = cast<llvm::StructType>(ProtocolTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_protocol", ProtocolTy);
+ ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy);
+
+ // Class description structures
+
+ // struct _objc_ivar {
+ // char *ivar_name;
+ // char *ivar_type;
+ // int ivar_offset;
+ // }
+ IvarTy = llvm::StructType::get(VMContext, Int8PtrTy,
+ Int8PtrTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_ivar", IvarTy);
+
+ // struct _objc_ivar_list *
+ IvarListTy = llvm::OpaqueType::get(VMContext);
+ CGM.getModule().addTypeName("struct._objc_ivar_list", IvarListTy);
+ IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
+
+ // struct _objc_method_list *
+ MethodListTy = llvm::OpaqueType::get(VMContext);
+ CGM.getModule().addTypeName("struct._objc_method_list", MethodListTy);
+ MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
+
+ // struct _objc_class_extension *
+ ClassExtensionTy =
+ llvm::StructType::get(VMContext, IntTy,
+ Int8PtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_class_extension", ClassExtensionTy);
+ ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
+
+ llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext);
+
+ // struct _objc_class {
+ // Class isa;
+ // Class super_class;
+ // char *name;
+ // long version;
+ // long info;
+ // long instance_size;
+ // struct _objc_ivar_list *ivars;
+ // struct _objc_method_list *methods;
+ // struct _objc_cache *cache;
+ // struct _objc_protocol_list *protocols;
+ // char *ivar_layout;
+ // struct _objc_class_ext *ext;
+ // };
+ T = llvm::StructType::get(VMContext,
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ Int8PtrTy,
+ LongTy,
+ LongTy,
+ LongTy,
+ IvarListPtrTy,
+ MethodListPtrTy,
+ CachePtrTy,
+ ProtocolListPtrTy,
+ Int8PtrTy,
+ ClassExtensionPtrTy,
+ NULL);
+ cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(T);
+
+ ClassTy = cast<llvm::StructType>(ClassTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_class", ClassTy);
+ ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
+
+ // struct _objc_category {
+ // char *category_name;
+ // char *class_name;
+ // struct _objc_method_list *instance_method;
+ // struct _objc_method_list *class_method;
+ // uint32_t size; // sizeof(struct _objc_category)
+ // struct _objc_property_list *instance_properties;// category's @property
+ // }
+ CategoryTy = llvm::StructType::get(VMContext, Int8PtrTy,
+ Int8PtrTy,
+ MethodListPtrTy,
+ MethodListPtrTy,
+ ProtocolListPtrTy,
+ IntTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_category", CategoryTy);
+
+ // Global metadata structures
+
+ // struct _objc_symtab {
+ // long sel_ref_cnt;
+ // SEL *refs;
+ // short cls_def_cnt;
+ // short cat_def_cnt;
+ // char *defs[cls_def_cnt + cat_def_cnt];
+ // }
+ SymtabTy = llvm::StructType::get(VMContext, LongTy,
+ SelectorPtrTy,
+ ShortTy,
+ ShortTy,
+ llvm::ArrayType::get(Int8PtrTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_symtab", SymtabTy);
+ SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
+
+ // struct _objc_module {
+ // long version;
+ // long size; // sizeof(struct _objc_module)
+ // char *name;
+ // struct _objc_symtab* symtab;
+ // }
+ ModuleTy =
+ llvm::StructType::get(VMContext, LongTy,
+ LongTy,
+ Int8PtrTy,
+ SymtabPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_module", ModuleTy);
+
+
+ // FIXME: This is the size of the setjmp buffer and should be target
+ // specific. 18 is what's used on 32-bit X86.
+ uint64_t SetJmpBufferSize = 18;
+
+ // Exceptions
+ const llvm::Type *StackPtrTy = llvm::ArrayType::get(
+ llvm::Type::getInt8PtrTy(VMContext), 4);
+
+ ExceptionDataTy =
+ llvm::StructType::get(VMContext,
+ llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
+ SetJmpBufferSize),
+ StackPtrTy, NULL);
+ CGM.getModule().addTypeName("struct._objc_exception_data",
+ ExceptionDataTy);
+
+}
+
+ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm) {
+ // struct _method_list_t {
+ // uint32_t entsize; // sizeof(struct _objc_method)
+ // uint32_t method_count;
+ // struct _objc_method method_list[method_count];
+ // }
+ MethodListnfABITy = llvm::StructType::get(VMContext, IntTy,
+ IntTy,
+ llvm::ArrayType::get(MethodTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct.__method_list_t",
+ MethodListnfABITy);
+ // struct method_list_t *
+ MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
+
+ // struct _protocol_t {
+ // id isa; // NULL
+ // const char * const protocol_name;
+ // const struct _protocol_list_t * protocol_list; // super protocols
+ // const struct method_list_t * const instance_methods;
+ // const struct method_list_t * const class_methods;
+ // const struct method_list_t *optionalInstanceMethods;
+ // const struct method_list_t *optionalClassMethods;
+ // const struct _prop_list_t * properties;
+ // const uint32_t size; // sizeof(struct _protocol_t)
+ // const uint32_t flags; // = 0
+ // }
+
+ // Holder for struct _protocol_list_t *
+ llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
+
+ ProtocolnfABITy = llvm::StructType::get(VMContext, ObjectPtrTy,
+ Int8PtrTy,
+ llvm::PointerType::getUnqual(
+ ProtocolListTyHolder),
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ PropertyListPtrTy,
+ IntTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._protocol_t",
+ ProtocolnfABITy);
+
+ // struct _protocol_t*
+ ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
+
+ // struct _protocol_list_t {
+ // long protocol_count; // Note, this is 32/64 bit
+ // struct _protocol_t *[protocol_count];
+ // }
+ ProtocolListnfABITy = llvm::StructType::get(VMContext, LongTy,
+ llvm::ArrayType::get(
+ ProtocolnfABIPtrTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_protocol_list",
+ ProtocolListnfABITy);
+ cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(
+ ProtocolListnfABITy);
+
+ // struct _objc_protocol_list*
+ ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
+
+ // struct _ivar_t {
+ // unsigned long int *offset; // pointer to ivar offset location
+ // char *name;
+ // char *type;
+ // uint32_t alignment;
+ // uint32_t size;
+ // }
+ IvarnfABITy = llvm::StructType::get(VMContext,
+ llvm::PointerType::getUnqual(LongTy),
+ Int8PtrTy,
+ Int8PtrTy,
+ IntTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._ivar_t", IvarnfABITy);
+
+ // struct _ivar_list_t {
+ // uint32 entsize; // sizeof(struct _ivar_t)
+ // uint32 count;
+ // struct _iver_t list[count];
+ // }
+ IvarListnfABITy = llvm::StructType::get(VMContext, IntTy,
+ IntTy,
+ llvm::ArrayType::get(
+ IvarnfABITy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._ivar_list_t", IvarListnfABITy);
+
+ IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
+
+ // struct _class_ro_t {
+ // uint32_t const flags;
+ // uint32_t const instanceStart;
+ // uint32_t const instanceSize;
+ // uint32_t const reserved; // only when building for 64bit targets
+ // const uint8_t * const ivarLayout;
+ // const char *const name;
+ // const struct _method_list_t * const baseMethods;
+ // const struct _objc_protocol_list *const baseProtocols;
+ // const struct _ivar_list_t *const ivars;
+ // const uint8_t * const weakIvarLayout;
+ // const struct _prop_list_t * const properties;
+ // }
+
+ // FIXME. Add 'reserved' field in 64bit abi mode!
+ ClassRonfABITy = llvm::StructType::get(VMContext, IntTy,
+ IntTy,
+ IntTy,
+ Int8PtrTy,
+ Int8PtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ IvarListnfABIPtrTy,
+ Int8PtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._class_ro_t",
+ ClassRonfABITy);
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ ImpnfABITy = llvm::PointerType::getUnqual(
+ llvm::FunctionType::get(ObjectPtrTy, Params, false));
+
+ // struct _class_t {
+ // struct _class_t *isa;
+ // struct _class_t * const superclass;
+ // void *cache;
+ // IMP *vtable;
+ // struct class_ro_t *ro;
+ // }
+
+ llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext);
+ ClassnfABITy =
+ llvm::StructType::get(VMContext,
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ CachePtrTy,
+ llvm::PointerType::getUnqual(ImpnfABITy),
+ llvm::PointerType::getUnqual(ClassRonfABITy),
+ NULL);
+ CGM.getModule().addTypeName("struct._class_t", ClassnfABITy);
+
+ cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(
+ ClassnfABITy);
+
+ // LLVM for struct _class_t *
+ ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
+
+ // struct _category_t {
+ // const char * const name;
+ // struct _class_t *const cls;
+ // const struct _method_list_t * const instance_methods;
+ // const struct _method_list_t * const class_methods;
+ // const struct _protocol_list_t * const protocols;
+ // const struct _prop_list_t * const properties;
+ // }
+ CategorynfABITy = llvm::StructType::get(VMContext, Int8PtrTy,
+ ClassnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._category_t", CategorynfABITy);
+
+ // New types for nonfragile abi messaging.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+
+ // First the clang type for struct _message_ref_t
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
+ Ctx.getTranslationUnitDecl(),
+ SourceLocation(),
+ &Ctx.Idents.get("_message_ref_t"));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.VoidPtrTy, 0, 0, false));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCSelType(), 0, 0, false));
+ RD->completeDefinition();
+
+ MessageRefCTy = Ctx.getTagDeclType(RD);
+ MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
+ MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ MessageRefPtrTy = llvm::PointerType::getUnqual(MessageRefTy);
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ SuperMessageRefTy = llvm::StructType::get(VMContext, ImpnfABITy,
+ SelectorPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._super_message_ref_t", SuperMessageRefTy);
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
+
+
+ // struct objc_typeinfo {
+ // const void** vtable; // objc_ehtype_vtable + 2
+ // const char* name; // c++ typeinfo string
+ // Class cls;
+ // };
+ EHTypeTy = llvm::StructType::get(VMContext,
+ llvm::PointerType::getUnqual(Int8PtrTy),
+ Int8PtrTy,
+ ClassnfABIPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_typeinfo", EHTypeTy);
+ EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
+}
+
+llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() {
+ FinishNonFragileABIModule();
+
+ return NULL;
+}
+
+void CGObjCNonFragileABIMac::AddModuleClassList(const
+ std::vector<llvm::GlobalValue*>
+ &Container,
+ const char *SymbolName,
+ const char *SectionName) {
+ unsigned NumClasses = Container.size();
+
+ if (!NumClasses)
+ return;
+
+ std::vector<llvm::Constant*> Symbols(NumClasses);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
+ ObjCTypes.Int8PtrTy);
+ llvm::Constant* Init =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ NumClasses),
+ Symbols);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ SymbolName);
+ GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection(SectionName);
+ CGM.AddUsedGlobal(GV);
+}
+
+void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
+ // nonfragile abi has no module definition.
+
+ // Build list of all implemented class addresses in array
+ // L_OBJC_LABEL_CLASS_$.
+ AddModuleClassList(DefinedClasses,
+ "\01L_OBJC_LABEL_CLASS_$",
+ "__DATA, __objc_classlist, regular, no_dead_strip");
+
+ for (unsigned i = 0; i < DefinedClasses.size(); i++) {
+ llvm::GlobalValue *IMPLGV = DefinedClasses[i];
+ if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
+ continue;
+ IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ }
+
+ for (unsigned i = 0; i < DefinedMetaClasses.size(); i++) {
+ llvm::GlobalValue *IMPLGV = DefinedMetaClasses[i];
+ if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
+ continue;
+ IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ }
+
+ AddModuleClassList(DefinedNonLazyClasses,
+ "\01L_OBJC_LABEL_NONLAZY_CLASS_$",
+ "__DATA, __objc_nlclslist, regular, no_dead_strip");
+
+ // Build list of all implemented category addresses in array
+ // L_OBJC_LABEL_CATEGORY_$.
+ AddModuleClassList(DefinedCategories,
+ "\01L_OBJC_LABEL_CATEGORY_$",
+ "__DATA, __objc_catlist, regular, no_dead_strip");
+ AddModuleClassList(DefinedNonLazyCategories,
+ "\01L_OBJC_LABEL_NONLAZY_CATEGORY_$",
+ "__DATA, __objc_nlcatlist, regular, no_dead_strip");
+
+ EmitImageInfo();
+}
+
+/// LegacyDispatchedSelector - Returns true if SEL is not in the list of
+/// NonLegacyDispatchMethods; false otherwise. What this means is that
+/// except for the 19 selectors in the list, we generate 32bit-style
+/// message dispatch call for all the rest.
+///
+bool CGObjCNonFragileABIMac::LegacyDispatchedSelector(Selector Sel) {
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ default:
+ assert(0 && "Invalid dispatch method!");
+ case CodeGenOptions::Legacy:
+ return true;
+ case CodeGenOptions::NonLegacy:
+ return false;
+ case CodeGenOptions::Mixed:
+ break;
+ }
+
+ // If so, see whether this selector is in the white-list of things which must
+ // use the new dispatch convention. We lazily build a dense set for this.
+ if (NonLegacyDispatchMethods.empty()) {
+ NonLegacyDispatchMethods.insert(GetNullarySelector("alloc"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("class"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("self"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("isFlipped"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("length"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("count"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("retain"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("release"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("autorelease"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("hash"));
+
+ NonLegacyDispatchMethods.insert(GetUnarySelector("allocWithZone"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isKindOfClass"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("respondsToSelector"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("objectForKey"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("objectAtIndex"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isEqualToString"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isEqual"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("addObject"));
+ // "countByEnumeratingWithState:objects:count"
+ IdentifierInfo *KeyIdents[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ NonLegacyDispatchMethods.insert(
+ CGM.getContext().Selectors.getSelector(3, KeyIdents));
+ }
+
+ return (NonLegacyDispatchMethods.count(Sel) == 0);
+}
+
+// Metadata flags
+enum MetaDataDlags {
+ CLS = 0x0,
+ CLS_META = 0x1,
+ CLS_ROOT = 0x2,
+ OBJC2_CLS_HIDDEN = 0x10,
+ CLS_EXCEPTION = 0x20
+};
+/// BuildClassRoTInitializer - generate meta-data for:
+/// struct _class_ro_t {
+/// uint32_t const flags;
+/// uint32_t const instanceStart;
+/// uint32_t const instanceSize;
+/// uint32_t const reserved; // only when building for 64bit targets
+/// const uint8_t * const ivarLayout;
+/// const char *const name;
+/// const struct _method_list_t * const baseMethods;
+/// const struct _protocol_list_t *const baseProtocols;
+/// const struct _ivar_list_t *const ivars;
+/// const uint8_t * const weakIvarLayout;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
+ unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ std::vector<llvm::Constant*> Values(10); // 11 for 64bit targets!
+ Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+ Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
+ Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
+ // FIXME. For 64bit targets add 0 here.
+ Values[ 3] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, true);
+ Values[ 4] = GetClassName(ID->getIdentifier());
+ // const struct _method_list_t * const baseMethods;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName("\01l_OBJC_$_");
+ if (flags & CLS_META) {
+ MethodListName += "CLASS_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ } else {
+ MethodListName += "INSTANCE_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ }
+ }
+ }
+ Values[ 5] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const", Methods);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
+ Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ + OID->getName(),
+ OID->all_referenced_protocol_begin(),
+ OID->all_referenced_protocol_end());
+
+ if (flags & CLS_META)
+ Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ else
+ Values[ 7] = EmitIvarList(ID);
+ Values[ 8] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, false);
+ if (flags & CLS_META)
+ Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ else
+ Values[ 9] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
+ Values);
+ llvm::GlobalVariable *CLASS_RO_GV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ (flags & CLS_META) ?
+ std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
+ std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
+ CLASS_RO_GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
+ CLASS_RO_GV->setSection("__DATA, __objc_const");
+ return CLASS_RO_GV;
+
+}
+
+/// BuildClassMetaData - This routine defines that to-level meta-data
+/// for the given ClassName for:
+/// struct _class_t {
+/// struct _class_t *isa;
+/// struct _class_t * const superclass;
+/// void *cache;
+/// IMP *vtable;
+/// struct class_ro_t *ro;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
+ std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility) {
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = IsAGV;
+ Values[1] = SuperClassGV;
+ if (!Values[1])
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
+ Values[2] = ObjCEmptyCacheVar; // &ObjCEmptyCacheVar
+ Values[3] = ObjCEmptyVtableVar; // &ObjCEmptyVtableVar
+ Values[4] = ClassRoGV; // &CLASS_RO_GV
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
+ Values);
+ llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
+ GV->setInitializer(Init);
+ GV->setSection("__DATA, __objc_data");
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassnfABITy));
+ if (HiddenVisibility)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ return GV;
+}
+
+bool
+CGObjCNonFragileABIMac::ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+ return OD->getClassMethod(GetNullarySelector("load")) != 0;
+}
+
+void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize) {
+ const ASTRecordLayout &RL =
+ CGM.getContext().getASTObjCImplementationLayout(OID);
+
+ // InstanceSize is really instance end.
+ InstanceSize = llvm::RoundUpToAlignment(RL.getDataSize(), 8) / 8;
+
+ // If there are no fields, the start is the same as the end.
+ if (!RL.getFieldCount())
+ InstanceStart = InstanceSize;
+ else
+ InstanceStart = RL.getFieldOffset(0) / 8;
+}
+
+void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ if (!ObjCEmptyCacheVar) {
+ ObjCEmptyCacheVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.CacheTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_cache");
+
+ ObjCEmptyVtableVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.ImpnfABITy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_vtable");
+ }
+ assert(ID->getClassInterface() &&
+ "CGObjCNonFragileABIMac::GenerateClass - class is 0");
+ // FIXME: Is this correct (that meta class size is never computed)?
+ uint32_t InstanceStart =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassnfABITy);
+ uint32_t InstanceSize = InstanceStart;
+ uint32_t flags = CLS_META;
+ std::string ObjCMetaClassName(getMetaclassSymbolPrefix());
+ std::string ObjCClassName(getClassSymbolPrefix());
+
+ llvm::GlobalVariable *SuperClassGV, *IsAGV;
+
+ bool classIsHidden =
+ CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+ if (ID->getNumIvarInitializers())
+ flags |= eClassFlags_ABI2_HasCXXStructors;
+ if (!ID->getClassInterface()->getSuperClass()) {
+ // class is root
+ flags |= CLS_ROOT;
+ SuperClassGV = GetClassGlobal(ObjCClassName + ClassName);
+ IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName);
+ } else {
+ // Has a root. Current class is not a root.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ IsAGV = GetClassGlobal(ObjCMetaClassName + Root->getNameAsString());
+ if (Root->hasAttr<WeakImportAttr>())
+ IsAGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ // work on super class metadata symbol.
+ std::string SuperClassName =
+ ObjCMetaClassName +
+ ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(SuperClassName);
+ if (ID->getClassInterface()->getSuperClass()->hasAttr<WeakImportAttr>())
+ SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+ llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,ID);
+ std::string TClassName = ObjCMetaClassName + ClassName;
+ llvm::GlobalVariable *MetaTClass =
+ BuildClassMetaData(TClassName, IsAGV, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+ DefinedMetaClasses.push_back(MetaTClass);
+
+ // Metadata for the class
+ flags = CLS;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+ if (ID->getNumIvarInitializers())
+ flags |= eClassFlags_ABI2_HasCXXStructors;
+
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
+ flags |= CLS_EXCEPTION;
+
+ if (!ID->getClassInterface()->getSuperClass()) {
+ flags |= CLS_ROOT;
+ SuperClassGV = 0;
+ } else {
+ // Has a root. Current class is not a root.
+ std::string RootClassName =
+ ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(ObjCClassName + RootClassName);
+ if (ID->getClassInterface()->getSuperClass()->hasAttr<WeakImportAttr>())
+ SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+ GetClassSizeInfo(ID, InstanceStart, InstanceSize);
+ CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,
+ ID);
+
+ TClassName = ObjCClassName + ClassName;
+ llvm::GlobalVariable *ClassMD =
+ BuildClassMetaData(TClassName, MetaTClass, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+ DefinedClasses.push_back(ClassMD);
+
+ // Determine if this class is also "non-lazy".
+ if (ImplementationIsNonLazy(ID))
+ DefinedNonLazyClasses.push_back(ClassMD);
+
+ // Force the definition of the EHType if necessary.
+ if (flags & CLS_EXCEPTION)
+ GetInterfaceEHType(ID->getClassInterface(), true);
+}
+
+/// GenerateProtocolRef - This routine is called to generate code for
+/// a protocol reference expression; as in:
+/// @code
+/// @protocol(Proto1);
+/// @endcode
+/// It generates a weak reference to l_OBJC_PROTOCOL_REFERENCE_$_Proto1
+/// which will hold address of the protocol meta-data.
+///
+llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+
+ // This routine is called for @protocol only. So, we must build definition
+ // of protocol's meta-data (not a reference to it!)
+ //
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
+ ObjCTypes.ExternalProtocolPtrTy);
+
+ std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
+ ProtocolName += PD->getName();
+
+ llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
+ if (PTGV)
+ return Builder.CreateLoad(PTGV, "tmp");
+ PTGV = new llvm::GlobalVariable(
+ CGM.getModule(),
+ Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ProtocolName);
+ PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(PTGV);
+ return Builder.CreateLoad(PTGV, "tmp");
+}
+
+/// GenerateCategory - Build metadata for a category implementation.
+/// struct _category_t {
+/// const char * const name;
+/// struct _class_t *const cls;
+/// const struct _method_list_t * const instance_methods;
+/// const struct _method_list_t * const class_methods;
+/// const struct _protocol_list_t * const protocols;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const char *Prefix = "\01l_OBJC_$_CATEGORY_";
+ std::string ExtCatName(Prefix + Interface->getNameAsString()+
+ "_$_" + OCD->getNameAsString());
+ std::string ExtClassName(getClassSymbolPrefix() +
+ Interface->getNameAsString());
+
+ std::vector<llvm::Constant*> Values(6);
+ Values[0] = GetClassName(OCD->getIdentifier());
+ // meta-class entry symbol
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
+ if (Interface->hasAttr<WeakImportAttr>())
+ ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ Values[1] = ClassGV;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName(Prefix);
+ MethodListName += "INSTANCE_METHODS_" + Interface->getNameAsString() +
+ "_$_" + OCD->getNameAsString();
+
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[2] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+
+ MethodListName = Prefix;
+ MethodListName += "CLASS_METHODS_" + Interface->getNameAsString() + "_$_" +
+ OCD->getNameAsString();
+ Methods.clear();
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[3] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+ if (Category) {
+ llvm::SmallString<256> ExtName;
+ llvm::raw_svector_ostream(ExtName) << Interface->getName() << "_$_"
+ << OCD->getName();
+ Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ + Interface->getName() + "_$_"
+ + Category->getName(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ Values[5] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+ Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy,
+ Values);
+ llvm::GlobalVariable *GCATV
+ = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.CategorynfABITy,
+ false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ ExtCatName);
+ GCATV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.CategorynfABITy));
+ GCATV->setSection("__DATA, __objc_const");
+ CGM.AddUsedGlobal(GCATV);
+ DefinedCategories.push_back(GCATV);
+
+ // Determine if this category is also "non-lazy".
+ if (ImplementationIsNonLazy(OCD))
+ DefinedNonLazyCategories.push_back(GCATV);
+}
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
+ const ObjCMethodDecl *MD) {
+ llvm::Function *Fn = GetMethodDefinition(MD);
+ if (!Fn)
+ return 0;
+
+ std::vector<llvm::Constant*> Method(3);
+ Method[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Method[1] = GetMethodVarType(MD);
+ Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+/// EmitMethodList - Build meta-data for method declarations
+/// struct _method_list_t {
+/// uint32_t entsize; // sizeof(struct _objc_method)
+/// uint32_t method_count;
+/// struct _objc_method method_list[method_count];
+/// }
+///
+llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ // sizeof(struct _objc_method)
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ // method_count
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Name);
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection(Section);
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodListnfABIPtrTy);
+}
+
+/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+/// the given ivar.
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+ std::string Name = "OBJC_IVAR_$_" + Container->getNameAsString() +
+ '.' + Ivar->getNameAsString();
+ llvm::GlobalVariable *IvarOffsetGV =
+ CGM.getModule().getGlobalVariable(Name);
+ if (!IvarOffsetGV)
+ IvarOffsetGV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.LongTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ return IvarOffsetGV;
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int Offset) {
+ llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar);
+ IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
+ Offset));
+ IvarOffsetGV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.LongTy));
+
+ // FIXME: This matches gcc, but shouldn't the visibility be set on the use as
+ // well (i.e., in ObjCIvarOffsetVariable).
+ if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+ Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+ CGM.getDeclVisibilityMode(ID) == LangOptions::Hidden)
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ else
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ IvarOffsetGV->setSection("__DATA, __objc_const");
+ return IvarOffsetGV;
+}
+
+/// EmitIvarList - Emit the ivar list for the given
+/// implementation. The return value has type
+/// IvarListnfABIPtrTy.
+/// struct _ivar_t {
+/// unsigned long int *offset; // pointer to ivar offset location
+/// char *name;
+/// char *type;
+/// uint32_t alignment;
+/// uint32_t size;
+/// }
+/// struct _ivar_list_t {
+/// uint32 entsize; // sizeof(struct _ivar_t)
+/// uint32 count;
+/// struct _iver_t list[count];
+/// }
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
+ const ObjCImplementationDecl *ID) {
+
+ std::vector<llvm::Constant*> Ivars, Ivar(5);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
+
+ // FIXME. Consolidate this with similar code in GenerateClass.
+
+ // Collect declared and synthesized ivars in a small vector.
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
+ ComputeIvarBaseOffset(CGM, ID, IVD));
+ Ivar[1] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[2] = GetMethodVarType(IVD);
+ const llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(IVD->getType());
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
+ unsigned Align = CGM.getContext().getPreferredTypeAlign(
+ IVD->getType().getTypePtr()) >> 3;
+ Align = llvm::Log2_32(Align);
+ Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align);
+ // NOTE. Size of a bitfield does not match gcc's, because of the
+ // way bitfields are treated special in each. But I am told that
+ // 'size' for bitfield ivars is ignored by the runtime so it does
+ // not matter. If it matters, there is enough info to get the
+ // bitfield right!
+ Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar));
+ }
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ std::vector<llvm::Constant*> Values(3);
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
+ Ivars.size());
+ Values[2] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Prefix + OID->getName());
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection("__DATA, __objc_const");
+
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/// GetOrEmitProtocol - Generate the protocol meta-data:
+/// @code
+/// struct _protocol_t {
+/// id isa; // NULL
+/// const char * const protocol_name;
+/// const struct _protocol_list_t * protocol_list; // super protocols
+/// const struct method_list_t * const instance_methods;
+/// const struct method_list_t * const class_methods;
+/// const struct method_list_t *optionalInstanceMethods;
+/// const struct method_list_t *optionalClassMethods;
+/// const struct _prop_list_t * properties;
+/// const uint32_t size; // sizeof(struct _protocol_t)
+/// const uint32_t flags; // = 0
+/// }
+/// @endcode
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ } else {
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ } else {
+ ClassMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(10);
+ // isa is NULL
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
+ Values[1] = GetClassName(PD->getIdentifier());
+ Values[2] = EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+
+ Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ InstanceMethods);
+ Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ ClassMethods);
+ Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ OptInstanceMethods);
+ Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ OptClassMethods);
+ Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getName(),
+ 0, PD, ObjCTypes);
+ uint32_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
+ Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
+ false, llvm::GlobalValue::WeakAnyLinkage, Init,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(Entry);
+
+ // Use this protocol meta-data to build protocol list table in section
+ // __DATA, __objc_protolist
+ llvm::GlobalVariable *PTGV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABIPtrTy,
+ false, llvm::GlobalValue::WeakAnyLinkage, Entry,
+ "\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getName());
+ PTGV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+ PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(PTGV);
+ return Entry;
+}
+
+/// EmitProtocolList - Generate protocol list meta-data:
+/// @code
+/// struct _protocol_list_t {
+/// long protocol_count; // Note, this is 32/64 bit
+/// struct _protocol_t[protocol_count];
+/// }
+/// @endcode
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitProtocolList(llvm::Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ std::vector<llvm::Constant*> ProtocolRefs;
+
+ // Just return null for empty protocol lists
+ if (begin == end)
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+
+ // FIXME: We shouldn't need to do this lookup here, should we?
+ llvm::SmallString<256> TmpName;
+ Name.toVector(TmpName);
+ llvm::GlobalVariable *GV =
+ CGM.getModule().getGlobalVariable(TmpName.str(), true);
+ if (GV)
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy);
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin)); // Implemented???
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(
+ ObjCTypes.ProtocolnfABIPtrTy));
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] =
+ llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
+ Values[1] =
+ llvm::ConstantArray::get(
+ llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Name);
+ GV->setSection("__DATA, __objc_const");
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.ProtocolListnfABIPtrTy);
+}
+
+/// GetMethodDescriptionConstant - This routine build following meta-data:
+/// struct _objc_method {
+/// SEL _cmd;
+/// char *method_type;
+/// char *_imp;
+/// }
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ std::vector<llvm::Constant*> Desc(3);
+ Desc[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ // Protocol methods have no implementation. So, this entry is always NULL.
+ Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
+}
+
+/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
+/// This code gen. amounts to generating code for:
+/// @code
+/// (type *)((char *)base + _OBJC_IVAR_$_.ivar;
+/// @encode
+///
+LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
+ CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),"ivar");
+}
+
+CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
+ CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs) {
+ // FIXME. Even though IsSuper is passes. This function doese not handle calls
+ // to 'super' receivers.
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::Value *Arg0 = Receiver;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+
+ // Find the message function name.
+ // FIXME. This is too much work to get the ABI-specific result type needed to
+ // find the message name.
+ const CGFunctionInfo &FnInfo
+ = Types.getFunctionInfo(ResultType, CallArgList(),
+ FunctionType::ExtInfo());
+ llvm::Constant *Fn = 0;
+ std::string Name("\01l_");
+ if (CGM.ReturnTypeUsesSRet(FnInfo)) {
+ if (IsSuper) {
+ Fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
+ Name += "objc_msgSendSuper2_stret_fixup";
+ } else {
+ Fn = ObjCTypes.getMessageSendStretFixupFn();
+ Name += "objc_msgSend_stret_fixup";
+ }
+ } else if (!IsSuper && CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = ObjCTypes.getMessageSendFpretFixupFn();
+ Name += "objc_msgSend_fpret_fixup";
+ } else {
+ if (IsSuper) {
+ Fn = ObjCTypes.getMessageSendSuper2FixupFn();
+ Name += "objc_msgSendSuper2_fixup";
+ } else {
+ Fn = ObjCTypes.getMessageSendFixupFn();
+ Name += "objc_msgSend_fixup";
+ }
+ }
+ assert(Fn && "CGObjCNonFragileABIMac::EmitMessageSend");
+ Name += '_';
+ std::string SelName(Sel.getAsString());
+ // Replace all ':' in selector name with '_' ouch!
+ for (unsigned i = 0; i < SelName.size(); i++)
+ if (SelName[i] == ':')
+ SelName[i] = '_';
+ Name += SelName;
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (!GV) {
+ // Build message ref table entry.
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = Fn;
+ Values[1] = GetMethodVarName(Sel);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ Name);
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ GV->setAlignment(16);
+ GV->setSection("__DATA, __objc_msgrefs, coalesced");
+ }
+ llvm::Value *Arg1 = CGF.Builder.CreateBitCast(GV, ObjCTypes.MessageRefPtrTy);
+
+ CallArgList ActualArgs;
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg1),
+ ObjCTypes.MessageRefCPtrTy));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ const CGFunctionInfo &FnInfo1 = Types.getFunctionInfo(ResultType, ActualArgs,
+ FunctionType::ExtInfo());
+ llvm::Value *Callee = CGF.Builder.CreateStructGEP(Arg1, 0);
+ Callee = CGF.Builder.CreateLoad(Callee);
+ const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo1, true);
+ Callee = CGF.Builder.CreateBitCast(Callee,
+ llvm::PointerType::getUnqual(FTy));
+ return CGF.EmitCall(FnInfo1, Callee, Return, ActualArgs);
+}
+
+/// Generate code for a message send expression in the nonfragile abi.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ return LegacyDispatchedSelector(Sel)
+ ? EmitLegacyMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, ObjCTypes)
+ : EmitMessageSend(CGF, Return, ResultType, Sel,
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs);
+}
+
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name) {
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+
+ if (!GV) {
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ 0, Name);
+ }
+
+ return GV;
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_REFERENCES_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, "tmp");
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, "tmp");
+}
+
+/// EmitMetaClassRef - Return a Value * of the address of _class_t
+/// meta-data
+///
+llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
+ if (Entry)
+ return Builder.CreateLoad(Entry, "tmp");
+
+ std::string MetaClassName(getMetaclassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ MetaClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+
+ return Builder.CreateLoad(Entry, "tmp");
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCNonFragileABIMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ if (ID->hasAttr<WeakImportAttr>()) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+
+ return EmitClassRef(Builder, ID);
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // ...
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to "super' in a class method defined in
+ // a category implementation.
+ Target = EmitClassRef(CGF.Builder, Class);
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ } else
+ Target = EmitMetaClassRef(CGF.Builder, Class);
+ } else
+ Target = EmitSuperClassRef(CGF.Builder, Class);
+
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ const llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+
+ return (LegacyDispatchedSelector(Sel))
+ ? EmitLegacyMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, ObjCTypes)
+ : EmitMessageSend(CGF, Return, ResultType, Sel,
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
+ Selector Sel, bool lval) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Casted, "\01L_OBJC_SELECTOR_REFERENCES_");
+ Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ if (lval)
+ return Entry;
+ return Builder.CreateLoad(Entry, "tmp");
+}
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
+///
+void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src,
+ llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, ivarOffset);
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size) {
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
+ DestPtr, SrcPtr, Size);
+ return;
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ const llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ if (!threadlocal)
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ else
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignThreadLocalFn(),
+ src, dst, "threadlocalassign");
+ return;
+}
+
+namespace {
+ struct CallSyncExit : EHScopeStack::Cleanup {
+ llvm::Value *SyncExitFn;
+ llvm::Value *SyncArg;
+ CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
+ : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
+ }
+ };
+}
+
+void
+CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ // Evaluate the lock operand. This should dominate the cleanup.
+ llvm::Value *SyncArg = CGF.EmitScalarExpr(S.getSynchExpr());
+
+ // Acquire the lock.
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
+ ->setDoesNotThrow();
+
+ // Register an all-paths cleanup to release the lock.
+ CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup,
+ ObjCTypes.getSyncExitFn(),
+ SyncArg);
+
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
+
+ // Pop the lock-release cleanup.
+ CGF.PopCleanupBlock();
+}
+
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
+
+ struct CallObjCEndCatch : EHScopeStack::Cleanup {
+ CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
+ MightThrow(MightThrow), Fn(Fn) {}
+ bool MightThrow;
+ llvm::Value *Fn;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(Fn, 0, 0);
+ }
+ };
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetEHType(QualType T) {
+ // There's a particular fixed type info for 'id'.
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ llvm::Constant *IDEHType =
+ CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "OBJC_EHTYPE_id");
+ return IDEHType;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *PT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ return GetInterfaceEHType(IT->getDecl(), false);
+}
+
+void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
+ FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(),
+ ObjCTypes.getObjCBeginCatchFn(),
+ ObjCTypes.getObjCEndCatchFn(),
+ ObjCTypes.getExceptionRethrowFn());
+
+ llvm::SmallVector<CatchHandler, 8> Handlers;
+
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
+
+ // @catch(...) always matches.
+ if (!CatchDecl) {
+ Handler.TypeInfo = 0; // catch-all
+ // Don't consider any other catches.
+ break;
+ }
+
+ Handler.TypeInfo = GetEHType(CatchDecl->getType());
+ }
+
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
+ }
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
+
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.EHStack.popCatch();
+
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
+
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *RawExn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
+
+ // Enter the catch.
+ llvm::CallInst *Exn =
+ CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), RawExn,
+ "exn.adjusted");
+ Exn->setDoesNotThrow();
+
+ // Add a cleanup to leave the catch.
+ bool EndCatchMightThrow = (Handler.Variable == 0);
+ CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
+ EndCatchMightThrow,
+ ObjCTypes.getObjCEndCatchFn());
+
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
+
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
+
+ // Leave the earlier cleanup.
+ CGF.PopCleanupBlock();
+
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
+
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
+
+ // Pop out of the normal cleanup on the finally.
+ if (S.getFinallyStmt())
+ CGF.ExitFinallyBlock(FinallyInfo);
+
+ if (Cont.isValid())
+ CGF.EmitBlock(Cont.getBlock());
+}
+
+/// EmitThrowStmt - Generate code for a throw statement.
+void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *Exception;
+ llvm::Constant *FunctionThrowOrRethrow;
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ Exception = CGF.EmitScalarExpr(ThrowExpr);
+ FunctionThrowOrRethrow = ObjCTypes.getExceptionThrowFn();
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ Exception = CGF.ObjCEHValueStack.back();
+ FunctionThrowOrRethrow = ObjCTypes.getExceptionRethrowFn();
+ }
+
+ llvm::Value *ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+ llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
+ if (InvokeDest) {
+ CGF.Builder.CreateInvoke(FunctionThrowOrRethrow,
+ CGF.getUnreachableBlock(), InvokeDest,
+ &ExceptionAsObject, &ExceptionAsObject + 1);
+ } else {
+ CGF.Builder.CreateCall(FunctionThrowOrRethrow, ExceptionAsObject)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+ }
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition) {
+ llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
+
+ // If we don't need a definition, return the entry if found or check
+ // if we use an external reference.
+ if (!ForDefinition) {
+ if (Entry)
+ return Entry;
+
+ // If this type (or a super class) has the __objc_exception__
+ // attribute, emit an external reference.
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID))
+ return Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ ("OBJC_EHTYPE_$_" +
+ ID->getIdentifier()->getName()));
+ }
+
+ // Otherwise we need to either make a new entry or fill in the
+ // initializer.
+ assert((!Entry || !Entry->hasInitializer()) && "Duplicate EHType definition");
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ std::string VTableName = "objc_ehtype_vtable";
+ llvm::GlobalVariable *VTableGV =
+ CGM.getModule().getGlobalVariable(VTableName);
+ if (!VTableGV)
+ VTableGV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.Int8PtrTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, VTableName);
+
+ llvm::Value *VTableIdx =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1);
+ Values[1] = GetClassName(ID->getIdentifier());
+ Values[2] = GetClassGlobal(ClassName);
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
+
+ if (Entry) {
+ Entry->setInitializer(Init);
+ } else {
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ("OBJC_EHTYPE_$_" +
+ ID->getIdentifier()->getName()));
+ }
+
+ if (CGM.getLangOptions().getVisibilityMode() == LangOptions::Hidden)
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.EHTypeTy));
+
+ if (ForDefinition) {
+ Entry->setSection("__DATA,__objc_const");
+ Entry->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/* *** */
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ return new CGObjCMac(CGM);
+}
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacNonFragileABIObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ return new CGObjCNonFragileABIMac(CGM);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
new file mode 100644
index 0000000..584760f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
@@ -0,0 +1,234 @@
+//===----- CGObjCRuntime.h - Interface to ObjC Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for Objective-C code generation. Concrete
+// subclasses of this implement code generation for specific Objective-C
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_OBCJRUNTIME_H
+#define CLANG_CODEGEN_OBCJRUNTIME_H
+#include "clang/Basic/IdentifierTable.h" // Selector
+#include "clang/AST/DeclObjC.h"
+#include <string>
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Constant;
+ class Function;
+ class Module;
+ class StructLayout;
+ class StructType;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+namespace CodeGen {
+ class CodeGenFunction;
+}
+
+ class FieldDecl;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+ class ObjCContainerDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCImplementationDecl;
+ class ObjCInterfaceDecl;
+ class ObjCMessageExpr;
+ class ObjCMethodDecl;
+ class ObjCProtocolDecl;
+ class Selector;
+ class ObjCIvarDecl;
+ class ObjCStringLiteral;
+ class BlockDeclRefExpr;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+// FIXME: Several methods should be pure virtual but aren't to avoid the
+// partially-implemented subclass breaking.
+
+/// Implements runtime-specific code generation functions.
+class CGObjCRuntime {
+protected:
+ // Utility functions for unified ivar access. These need to
+ // eventually be folded into other places (the structure layout
+ // code).
+
+ /// Compute an offset to the given ivar, suitable for passing to
+ /// EmitValueForIvarAtOffset. Note that the correct handling of
+ /// bit-fields is carefully coordinated by these two, use caution!
+ ///
+ /// The latter overload is suitable for computing the offset of a
+ /// sythesized ivar.
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar);
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar);
+
+ LValue EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset);
+
+public:
+ virtual ~CGObjCRuntime();
+
+ /// Generate the function required to register all Objective-C components in
+ /// this compilation unit with the runtime library.
+ virtual llvm::Function *ModuleInitFunction() = 0;
+
+ /// Get a selector for the specified name and type values. The
+ /// return value should have the LLVM type for pointer-to
+ /// ASTContext::getObjCSelType().
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ Selector Sel, bool lval=false) = 0;
+
+ /// Get a typed selector.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method) = 0;
+
+ /// Get the type constant to catch for the given ObjC pointer type.
+ /// This is used externally to implement catching ObjC types in C++.
+ /// Runtimes which don't support this should add the appropriate
+ /// error to Sema.
+ virtual llvm::Constant *GetEHType(QualType T) = 0;
+
+ /// Generate a constant string object.
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
+
+ /// Generate a category. A category contains a list of methods (and
+ /// accompanying metadata) and a list of protocols.
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
+
+ /// Generate a class stucture for this class.
+ virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
+
+ /// Generate an Objective-C message send operation.
+ ///
+ /// \param Method - The method being called, this may be null if synthesizing
+ /// a property setter or getter.
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class = 0,
+ const ObjCMethodDecl *Method = 0) = 0;
+
+ /// Generate an Objective-C message send operation to the super
+ /// class initiated in a method for Class and with the given Self
+ /// object.
+ ///
+ /// \param Method - The method being called, this may be null if synthesizing
+ /// a property setter or getter.
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Self,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method = 0) = 0;
+
+ /// Emit the code to return the named protocol as an object, as in a
+ /// @protocol expression.
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate the named protocol. Protocols contain method metadata but no
+ /// implementations.
+ virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate a function preamble for a method with the specified
+ /// types.
+
+ // FIXME: Current this just generates the Function definition, but really this
+ // should also be generating the loads of the parameters, as the runtime
+ // should have full control over how parameters are passed.
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) = 0;
+
+ /// Return the runtime function for getting properties.
+ virtual llvm::Constant *GetPropertyGetFunction() = 0;
+
+ /// Return the runtime function for setting properties.
+ virtual llvm::Constant *GetPropertySetFunction() = 0;
+
+ // API for atomic copying of qualified aggregates in setter/getter.
+ virtual llvm::Constant *GetCopyStructFunction() = 0;
+
+ /// GetClass - Return a reference to the class for the given
+ /// interface decl.
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) = 0;
+
+ /// EnumerationMutationFunction - Return the function that's called by the
+ /// compiler when a mutation is detected during foreach iteration.
+ virtual llvm::Constant *EnumerationMutationFunction() = 0;
+
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) = 0;
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) = 0;
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) = 0;
+ virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) = 0;
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal=false) = 0;
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset) = 0;
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) = 0;
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) = 0;
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size) = 0;
+ virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
+ const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &) = 0;
+
+};
+
+/// Creates an instance of an Objective-C runtime class.
+//TODO: This should include some way of selecting which runtime to target.
+CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacNonFragileABIObjCRuntime(CodeGenModule &CGM);
+}
+}
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
new file mode 100644
index 0000000..60df613
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
@@ -0,0 +1,953 @@
+//===--- CGCXXRTTI.cpp - Emit LLVM Code for C++ RTTI descriptors ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of RTTI descriptors.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Type.h"
+#include "clang/Frontend/CodeGenOptions.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class RTTIBuilder {
+ CodeGenModule &CGM; // Per-module state.
+ llvm::LLVMContext &VMContext;
+
+ const llvm::Type *Int8PtrTy;
+
+ /// Fields - The fields of the RTTI descriptor currently being built.
+ llvm::SmallVector<llvm::Constant *, 16> Fields;
+
+ /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
+ /// descriptor of the given type.
+ llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
+
+ /// BuildVTablePointer - Build the vtable pointer for the given type.
+ void BuildVTablePointer(const Type *Ty);
+
+ /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+ /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
+ void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
+
+ /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+ /// classes with bases that do not satisfy the abi::__si_class_type_info
+ /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+ void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
+
+ /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
+ /// for pointer types.
+ void BuildPointerTypeInfo(QualType PointeeTy);
+
+ /// BuildObjCObjectTypeInfo - Build the appropriate kind of
+ /// type_info for an object type.
+ void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
+
+ /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
+ /// struct, used for member pointer types.
+ void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
+
+public:
+ RTTIBuilder(CodeGenModule &cgm)
+ : CGM(cgm), VMContext(cgm.getModule().getContext()),
+ Int8PtrTy(llvm::Type::getInt8PtrTy(VMContext)) { }
+
+ llvm::Constant *BuildName(QualType Ty, bool Hidden,
+ llvm::GlobalVariable::LinkageTypes Linkage) {
+ llvm::SmallString<256> OutName;
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ llvm::GlobalVariable *OGV = CGM.getModule().getNamedGlobal(Name);
+ if (OGV && !OGV->isDeclaration())
+ return llvm::ConstantExpr::getBitCast(OGV, Int8PtrTy);
+
+ llvm::Constant *C = llvm::ConstantArray::get(VMContext, Name.substr(4));
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, Linkage,
+ C, Name);
+ if (OGV) {
+ GV->takeName(OGV);
+ llvm::Constant *NewPtr = llvm::ConstantExpr::getBitCast(GV,
+ OGV->getType());
+ OGV->replaceAllUsesWith(NewPtr);
+ OGV->eraseFromParent();
+ }
+ if (Hidden)
+ GV->setVisibility(llvm::GlobalVariable::HiddenVisibility);
+ return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+ }
+
+ // FIXME: unify with DecideExtern
+ bool DecideHidden(QualType Ty) {
+ // For this type, see if all components are never hidden.
+ if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>())
+ return (DecideHidden(MPT->getPointeeType())
+ && DecideHidden(QualType(MPT->getClass(), 0)));
+ if (const PointerType *PT = Ty->getAs<PointerType>())
+ return DecideHidden(PT->getPointeeType());
+ if (const FunctionType *FT = Ty->getAs<FunctionType>()) {
+ if (DecideHidden(FT->getResultType()) == false)
+ return false;
+ if (const FunctionProtoType *FPT = Ty->getAs<FunctionProtoType>()) {
+ for (unsigned i = 0; i <FPT->getNumArgs(); ++i)
+ if (DecideHidden(FPT->getArgType(i)) == false)
+ return false;
+ for (unsigned i = 0; i <FPT->getNumExceptions(); ++i)
+ if (DecideHidden(FPT->getExceptionType(i)) == false)
+ return false;
+ return true;
+ }
+ }
+ if (const RecordType *RT = Ty->getAs<RecordType>())
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ return CGM.getDeclVisibilityMode(RD) == LangOptions::Hidden;
+ return false;
+ }
+
+ // Pointer type info flags.
+ enum {
+ /// PTI_Const - Type has const qualifier.
+ PTI_Const = 0x1,
+
+ /// PTI_Volatile - Type has volatile qualifier.
+ PTI_Volatile = 0x2,
+
+ /// PTI_Restrict - Type has restrict qualifier.
+ PTI_Restrict = 0x4,
+
+ /// PTI_Incomplete - Type is incomplete.
+ PTI_Incomplete = 0x8,
+
+ /// PTI_ContainingClassIncomplete - Containing class is incomplete.
+ /// (in pointer to member).
+ PTI_ContainingClassIncomplete = 0x10
+ };
+
+ // VMI type info flags.
+ enum {
+ /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
+ VMI_NonDiamondRepeat = 0x1,
+
+ /// VMI_DiamondShaped - Class is diamond shaped.
+ VMI_DiamondShaped = 0x2
+ };
+
+ // Base class type info flags.
+ enum {
+ /// BCTI_Virtual - Base class is virtual.
+ BCTI_Virtual = 0x1,
+
+ /// BCTI_Public - Base class is public.
+ BCTI_Public = 0x2
+ };
+
+ /// BuildTypeInfo - Build the RTTI type info struct for the given type.
+ ///
+ /// \param Force - true to force the creation of this RTTI value
+ /// \param ForEH - true if this is for exception handling
+ llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
+};
+}
+
+llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
+ // Mangle the RTTI name.
+ llvm::SmallString<256> OutName;
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ // Look for an existing global.
+ llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
+
+ if (!GV) {
+ // Create a new global variable.
+ GV = new llvm::GlobalVariable(CGM.getModule(), Int8PtrTy, /*Constant=*/true,
+ llvm::GlobalValue::ExternalLinkage, 0, Name);
+ }
+
+ return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+}
+
+/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
+/// info for that type is defined in the standard library.
+static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
+ // Itanium C++ ABI 2.9.2:
+ // Basic type information (e.g. for "int", "bool", etc.) will be kept in
+ // the run-time support library. Specifically, the run-time support
+ // library should contain type_info objects for the types X, X* and
+ // X const*, for every X in: void, bool, wchar_t, char, unsigned char,
+ // signed char, short, unsigned short, int, unsigned int, long,
+ // unsigned long, long long, unsigned long long, float, double, long double,
+ // char16_t, char32_t, and the IEEE 754r decimal and half-precision
+ // floating point types.
+ switch (Ty->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::Bool:
+ case BuiltinType::WChar:
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ return true;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ case BuiltinType::UndeducedAuto:
+ assert(false && "Should not see this type here!");
+
+ case BuiltinType::NullPtr:
+ assert(false && "FIXME: nullptr_t is not handled!");
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ assert(false && "FIXME: Objective-C types are unsupported!");
+ }
+
+ // Silent gcc.
+ return false;
+}
+
+static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
+ QualType PointeeTy = PointerTy->getPointeeType();
+ const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
+ if (!BuiltinTy)
+ return false;
+
+ // Check the qualifiers.
+ Qualifiers Quals = PointeeTy.getQualifiers();
+ Quals.removeConst();
+
+ if (!Quals.empty())
+ return false;
+
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+}
+
+/// IsStandardLibraryRTTIDescriptor - Returns whether the type
+/// information for the given type exists in the standard library.
+static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
+ // Type info for builtin types is defined in the standard library.
+ if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+
+ // Type info for some pointer types to builtin types is defined in the
+ // standard library.
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return TypeInfoIsInStandardLibrary(PointerTy);
+
+ return false;
+}
+
+/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
+/// the given type exists somewhere else, and that we should not emit the type
+/// information in this translation unit. Assumes that it is not a
+/// standard-library type.
+static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
+ QualType Ty) {
+ // If RTTI is disabled, don't consider key functions.
+ if (!Context.getLangOptions().RTTI) return false;
+
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!RD->hasDefinition())
+ return false;
+
+ if (!RD->isDynamicClass())
+ return false;
+
+ // Get the key function.
+ const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
+ if (KeyFunction && !KeyFunction->hasBody()) {
+ // The class has a key function, but it is not defined in this translation
+ // unit, so we should use the external descriptor for it.
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// IsIncompleteClassType - Returns whether the given record type is incomplete.
+static bool IsIncompleteClassType(const RecordType *RecordTy) {
+ return !RecordTy->getDecl()->isDefinition();
+}
+
+/// ContainsIncompleteClassType - Returns whether the given type contains an
+/// incomplete class type. This is true if
+///
+/// * The given type is an incomplete class type.
+/// * The given type is a pointer type whose pointee type contains an
+/// incomplete class type.
+/// * The given type is a member pointer type whose class is an incomplete
+/// class type.
+/// * The given type is a member pointer type whoise pointee type contains an
+/// incomplete class type.
+/// is an indirect or direct pointer to an incomplete class type.
+static bool ContainsIncompleteClassType(QualType Ty) {
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ if (IsIncompleteClassType(RecordTy))
+ return true;
+ }
+
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return ContainsIncompleteClassType(PointerTy->getPointeeType());
+
+ if (const MemberPointerType *MemberPointerTy =
+ dyn_cast<MemberPointerType>(Ty)) {
+ // Check if the class type is incomplete.
+ const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
+ if (IsIncompleteClassType(ClassType))
+ return true;
+
+ return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
+ }
+
+ return false;
+}
+
+/// getTypeInfoLinkage - Return the linkage that the type info and type info
+/// name constants should have for the given type.
+static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) {
+ // Itanium C++ ABI 2.9.5p7:
+ // In addition, it and all of the intermediate abi::__pointer_type_info
+ // structs in the chain down to the abi::__class_type_info for the
+ // incomplete class type must be prevented from resolving to the
+ // corresponding type_info structs for the complete class type, possibly
+ // by making them local static objects. Finally, a dummy class RTTI is
+ // generated for the incomplete type that will not resolve to the final
+ // complete class RTTI (because the latter need not exist), possibly by
+ // making it a local static object.
+ if (ContainsIncompleteClassType(Ty))
+ return llvm::GlobalValue::InternalLinkage;
+
+ switch (Ty->getLinkage()) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return llvm::GlobalValue::InternalLinkage;
+
+ case ExternalLinkage:
+ if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (RD->isDynamicClass())
+ return CodeGenModule::getVTableLinkage(RD);
+ }
+
+ return llvm::GlobalValue::WeakODRLinkage;
+ }
+
+ return llvm::GlobalValue::WeakODRLinkage;
+}
+
+// CanUseSingleInheritance - Return whether the given record decl has a "single,
+// public, non-virtual base at offset zero (i.e. the derived class is dynamic
+// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
+static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
+ // Check the number of bases.
+ if (RD->getNumBases() != 1)
+ return false;
+
+ // Get the base.
+ CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
+
+ // Check that the base is not virtual.
+ if (Base->isVirtual())
+ return false;
+
+ // Check that the base is public.
+ if (Base->getAccessSpecifier() != AS_public)
+ return false;
+
+ // Check that the class is dynamic iff the base is.
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (!BaseDecl->isEmpty() &&
+ BaseDecl->isDynamicClass() != RD->isDynamicClass())
+ return false;
+
+ return true;
+}
+
+void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
+ // abi::__class_type_info.
+ static const char * const ClassTypeInfo =
+ "_ZTVN10__cxxabiv117__class_type_infoE";
+ // abi::__si_class_type_info.
+ static const char * const SIClassTypeInfo =
+ "_ZTVN10__cxxabiv120__si_class_type_infoE";
+ // abi::__vmi_class_type_info.
+ static const char * const VMIClassTypeInfo =
+ "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+
+ const char *VTableName = 0;
+
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Non-canonical and dependent types shouldn't get here");
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ assert(false && "References shouldn't get here");
+
+ case Type::Builtin:
+ // GCC treats vector and complex types as fundamental types.
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ // FIXME: GCC treats block pointers as fundamental types?!
+ case Type::BlockPointer:
+ // abi::__fundamental_type_info.
+ VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ // abi::__array_type_info.
+ VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // abi::__function_type_info.
+ VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
+ break;
+
+ case Type::Enum:
+ // abi::__enum_type_info.
+ VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ VTableName = ClassTypeInfo;
+ } else if (CanUseSingleInheritance(RD)) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = VMIClassTypeInfo;
+ }
+
+ break;
+ }
+
+ case Type::ObjCObject:
+ // Ignore protocol qualifiers.
+ Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
+
+ // Handle id and Class.
+ if (isa<BuiltinType>(Ty)) {
+ VTableName = ClassTypeInfo;
+ break;
+ }
+
+ assert(isa<ObjCInterfaceType>(Ty));
+ // Fall through.
+
+ case Type::ObjCInterface:
+ if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = ClassTypeInfo;
+ }
+ break;
+
+ case Type::ObjCObjectPointer:
+ case Type::Pointer:
+ // abi::__pointer_type_info.
+ VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
+ break;
+
+ case Type::MemberPointer:
+ // abi::__pointer_to_member_type_info.
+ VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
+ break;
+ }
+
+ llvm::Constant *VTable =
+ CGM.getModule().getOrInsertGlobal(VTableName, Int8PtrTy);
+
+ const llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ // The vtable address point is 2.
+ llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
+ VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, &Two, 1);
+ VTable = llvm::ConstantExpr::getBitCast(VTable, Int8PtrTy);
+
+ Fields.push_back(VTable);
+}
+
+llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
+ // We want to operate on the canonical type.
+ Ty = CGM.getContext().getCanonicalType(Ty);
+
+ // Check if we've already emitted an RTTI descriptor for this type.
+ llvm::SmallString<256> OutName;
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
+ if (OldGV && !OldGV->isDeclaration())
+ return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy);
+
+ // Check if there is already an external RTTI descriptor for this type.
+ bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
+ if (!Force &&
+ (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM.getContext(), Ty)))
+ return GetAddrOfExternalRTTIDescriptor(Ty);
+
+ // Emit the standard library with external linkage.
+ llvm::GlobalVariable::LinkageTypes Linkage;
+ if (IsStdLib)
+ Linkage = llvm::GlobalValue::ExternalLinkage;
+ else
+ Linkage = getTypeInfoLinkage(Ty);
+
+ // Add the vtable pointer.
+ BuildVTablePointer(cast<Type>(Ty));
+
+ // And the name.
+ Fields.push_back(BuildName(Ty, DecideHidden(Ty), Linkage));
+
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Non-canonical and dependent types shouldn't get here");
+
+ // GCC treats vector types as fundamental types.
+ case Type::Builtin:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ case Type::BlockPointer:
+ // Itanium C++ ABI 2.9.5p4:
+ // abi::__fundamental_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ assert(false && "References shouldn't get here");
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__array_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__function_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Enum:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__enum_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ // We don't need to emit any fields.
+ break;
+ }
+
+ if (CanUseSingleInheritance(RD))
+ BuildSIClassTypeInfo(RD);
+ else
+ BuildVMIClassTypeInfo(RD);
+
+ break;
+ }
+
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
+ break;
+
+ case Type::ObjCObjectPointer:
+ BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ break;
+
+ case Type::Pointer:
+ BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
+ break;
+
+ case Type::MemberPointer:
+ BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
+ break;
+ }
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(VMContext, &Fields[0], Fields.size(),
+ /*Packed=*/false);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ /*Constant=*/true, Linkage, Init, Name);
+
+ // If there's already an old global variable, replace it with the new one.
+ if (OldGV) {
+ GV->takeName(OldGV);
+ llvm::Constant *NewPtr =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtr);
+ OldGV->eraseFromParent();
+ }
+
+ // GCC only relies on the uniqueness of the type names, not the
+ // type_infos themselves, so we can emit these as hidden symbols.
+ // But don't do this if we're worried about strict visibility
+ // compatibility.
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ CGM.setTypeVisibility(GV, cast<CXXRecordDecl>(RT->getDecl()),
+ /*ForRTTI*/ true);
+ else if (CGM.getCodeGenOpts().HiddenWeakVTables &&
+ Linkage == llvm::GlobalValue::WeakODRLinkage)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
+ return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+}
+
+/// ComputeQualifierFlags - Compute the pointer type info flags from the
+/// given qualifier.
+static unsigned ComputeQualifierFlags(Qualifiers Quals) {
+ unsigned Flags = 0;
+
+ if (Quals.hasConst())
+ Flags |= RTTIBuilder::PTI_Const;
+ if (Quals.hasVolatile())
+ Flags |= RTTIBuilder::PTI_Volatile;
+ if (Quals.hasRestrict())
+ Flags |= RTTIBuilder::PTI_Restrict;
+
+ return Flags;
+}
+
+/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
+/// for the given Objective-C object type.
+void RTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
+ // Drop qualifiers.
+ const Type *T = OT->getBaseType().getTypePtr();
+ assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
+
+ // The builtin types are abi::__class_type_infos and don't require
+ // extra fields.
+ if (isa<BuiltinType>(T)) return;
+
+ ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
+ ObjCInterfaceDecl *Super = Class->getSuperClass();
+
+ // Root classes are also __class_type_info.
+ if (!Super) return;
+
+ QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
+
+ // Everything else is single inheritance.
+ llvm::Constant *BaseTypeInfo = RTTIBuilder(CGM).BuildTypeInfo(SuperTy);
+ Fields.push_back(BaseTypeInfo);
+}
+
+/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
+void RTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
+ // Itanium C++ ABI 2.9.5p6b:
+ // It adds to abi::__class_type_info a single member pointing to the
+ // type_info structure for the base type,
+ llvm::Constant *BaseTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(RD->bases_begin()->getType());
+ Fields.push_back(BaseTypeInfo);
+}
+
+/// SeenBases - Contains virtual and non-virtual bases seen when traversing
+/// a class hierarchy.
+struct SeenBases {
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
+};
+
+/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
+/// abi::__vmi_class_type_info.
+///
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
+ SeenBases &Bases) {
+
+ unsigned Flags = 0;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ if (Base->isVirtual()) {
+ if (Bases.VirtualBases.count(BaseDecl)) {
+ // If this virtual base has been seen before, then the class is diamond
+ // shaped.
+ Flags |= RTTIBuilder::VMI_DiamondShaped;
+ } else {
+ if (Bases.NonVirtualBases.count(BaseDecl))
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+
+ // Mark the virtual base as seen.
+ Bases.VirtualBases.insert(BaseDecl);
+ }
+ } else {
+ if (Bases.NonVirtualBases.count(BaseDecl)) {
+ // If this non-virtual base has been seen before, then the class has non-
+ // diamond shaped repeated inheritance.
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+ } else {
+ if (Bases.VirtualBases.count(BaseDecl))
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+
+ // Mark the non-virtual base as seen.
+ Bases.NonVirtualBases.insert(BaseDecl);
+ }
+ }
+
+ // Walk all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = BaseDecl->bases_begin(),
+ E = BaseDecl->bases_end(); I != E; ++I)
+ Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+
+ return Flags;
+}
+
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
+ unsigned Flags = 0;
+ SeenBases Bases;
+
+ // Walk all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I)
+ Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+
+ return Flags;
+}
+
+/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+/// classes with bases that do not satisfy the abi::__si_class_type_info
+/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
+ const llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __flags is a word with flags describing details about the class
+ // structure, which may be referenced by using the __flags_masks
+ // enumeration. These flags refer to both direct and indirect bases.
+ unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_count is a word with the number of direct proper base class
+ // descriptions that follow.
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
+
+ if (!RD->getNumBases())
+ return;
+
+ const llvm::Type *LongLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy);
+
+ // Now add the base class descriptions.
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_info[] is an array of base class descriptions -- one for every
+ // direct proper base. Each description is of the type:
+ //
+ // struct abi::__base_class_type_info {
+ // public:
+ // const __class_type_info *__base_type;
+ // long __offset_flags;
+ //
+ // enum __offset_flags_masks {
+ // __virtual_mask = 0x1,
+ // __public_mask = 0x2,
+ // __offset_shift = 8
+ // };
+ // };
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXBaseSpecifier *Base = I;
+
+ // The __base_type member points to the RTTI for the base type.
+ Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(Base->getType()));
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ int64_t OffsetFlags = 0;
+
+ // All but the lower 8 bits of __offset_flags are a signed offset.
+ // For a non-virtual base, this is the offset in the object of the base
+ // subobject. For a virtual base, this is the offset in the virtual table of
+ // the virtual base offset for the virtual base referenced (negative).
+ if (Base->isVirtual())
+ OffsetFlags = CGM.getVTables().getVirtualBaseOffsetOffset(RD, BaseDecl);
+ else {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ OffsetFlags = Layout.getBaseClassOffset(BaseDecl) / 8;
+ };
+
+ OffsetFlags <<= 8;
+
+ // The low-order byte of __offset_flags contains flags, as given by the
+ // masks from the enumeration __offset_flags_masks.
+ if (Base->isVirtual())
+ OffsetFlags |= BCTI_Virtual;
+ if (Base->getAccessSpecifier() == AS_public)
+ OffsetFlags |= BCTI_Public;
+
+ Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
+ }
+}
+
+/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
+/// used for pointer types.
+void RTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __flags is a flag word describing the cv-qualification and other
+ // attributes of the type pointed to
+ unsigned Flags = ComputeQualifierFlags(Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
+ Flags |= PTI_Incomplete;
+
+ const llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __pointee is a pointer to the std::type_info derivation for the
+ // unqualified type being pointed to.
+ llvm::Constant *PointeeTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy);
+ Fields.push_back(PointeeTypeInfo);
+}
+
+/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
+/// struct, used for member pointer types.
+void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
+ QualType PointeeTy = Ty->getPointeeType();
+
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __flags is a flag word describing the cv-qualification and other
+ // attributes of the type pointed to.
+ unsigned Flags = ComputeQualifierFlags(Quals);
+
+ const RecordType *ClassType = cast<RecordType>(Ty->getClass());
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
+ Flags |= PTI_Incomplete;
+
+ if (IsIncompleteClassType(ClassType))
+ Flags |= PTI_ContainingClassIncomplete;
+
+ const llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __pointee is a pointer to the std::type_info derivation for the
+ // unqualified type being pointed to.
+ llvm::Constant *PointeeTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy);
+ Fields.push_back(PointeeTypeInfo);
+
+ // Itanium C++ ABI 2.9.5p9:
+ // __context is a pointer to an abi::__class_type_info corresponding to the
+ // class type containing the member pointed to
+ // (e.g., the "A" in "int A::*").
+ Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(QualType(ClassType, 0)));
+}
+
+llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
+ bool ForEH) {
+ // Return a bogus pointer if RTTI is disabled, unless it's for EH.
+ // FIXME: should we even be calling this method if RTTI is disabled
+ // and it's not for EH?
+ if (!ForEH && !getContext().getLangOptions().RTTI) {
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ return llvm::Constant::getNullValue(Int8PtrTy);
+ }
+
+ return RTTIBuilder(*this).BuildTypeInfo(Ty);
+}
+
+void CodeGenModule::EmitFundamentalRTTIDescriptor(QualType Type) {
+ QualType PointerType = Context.getPointerType(Type);
+ QualType PointerTypeConst = Context.getPointerType(Type.withConst());
+ RTTIBuilder(*this).BuildTypeInfo(Type, true);
+ RTTIBuilder(*this).BuildTypeInfo(PointerType, true);
+ RTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true);
+}
+
+void CodeGenModule::EmitFundamentalRTTIDescriptors() {
+ QualType FundamentalTypes[] = { Context.VoidTy, Context.Char32Ty,
+ Context.Char16Ty, Context.UnsignedLongLongTy,
+ Context.LongLongTy, Context.WCharTy,
+ Context.UnsignedShortTy, Context.ShortTy,
+ Context.UnsignedLongTy, Context.LongTy,
+ Context.UnsignedIntTy, Context.IntTy,
+ Context.UnsignedCharTy, Context.FloatTy,
+ Context.LongDoubleTy, Context.DoubleTy,
+ Context.CharTy, Context.BoolTy,
+ Context.SignedCharTy };
+ for (unsigned i = 0; i < sizeof(FundamentalTypes)/sizeof(QualType); ++i)
+ EmitFundamentalRTTIDescriptor(FundamentalTypes[i]);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
new file mode 100644
index 0000000..9b4e9f8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
@@ -0,0 +1,238 @@
+//===--- CGRecordLayout.h - LLVM Record Layout Information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGRECORDLAYOUT_H
+#define CLANG_CODEGEN_CGRECORDLAYOUT_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/Decl.h"
+namespace llvm {
+ class raw_ostream;
+ class Type;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// \brief Helper object for describing how to generate the code for access to a
+/// bit-field.
+///
+/// This structure is intended to describe the "policy" of how the bit-field
+/// should be accessed, which may be target, language, or ABI dependent.
+class CGBitFieldInfo {
+public:
+ /// Descriptor for a single component of a bit-field access. The entire
+ /// bit-field is constituted of a bitwise OR of all of the individual
+ /// components.
+ ///
+ /// Each component describes an accessed value, which is how the component
+ /// should be transferred to/from memory, and a target placement, which is how
+ /// that component fits into the constituted bit-field. The pseudo-IR for a
+ /// load is:
+ ///
+ /// %0 = gep %base, 0, FieldIndex
+ /// %1 = gep (i8*) %0, FieldByteOffset
+ /// %2 = (i(AccessWidth) *) %1
+ /// %3 = load %2, align AccessAlignment
+ /// %4 = shr %3, FieldBitStart
+ ///
+ /// and the composed bit-field is formed as the boolean OR of all accesses,
+ /// masked to TargetBitWidth bits and shifted to TargetBitOffset.
+ struct AccessInfo {
+ /// Offset of the field to load in the LLVM structure, if any.
+ unsigned FieldIndex;
+
+ /// Byte offset from the field address, if any. This should generally be
+ /// unused as the cleanest IR comes from having a well-constructed LLVM type
+ /// with proper GEP instructions, but sometimes its use is required, for
+ /// example if an access is intended to straddle an LLVM field boundary.
+ unsigned FieldByteOffset;
+
+ /// Bit offset in the accessed value to use. The width is implied by \see
+ /// TargetBitWidth.
+ unsigned FieldBitStart;
+
+ /// Bit width of the memory access to perform.
+ unsigned AccessWidth;
+
+ /// The alignment of the memory access, or 0 if the default alignment should
+ /// be used.
+ //
+ // FIXME: Remove use of 0 to encode default, instead have IRgen do the right
+ // thing when it generates the code, if avoiding align directives is
+ // desired.
+ unsigned AccessAlignment;
+
+ /// Offset for the target value.
+ unsigned TargetBitOffset;
+
+ /// Number of bits in the access that are destined for the bit-field.
+ unsigned TargetBitWidth;
+ };
+
+private:
+ /// The components to use to access the bit-field. We may need up to three
+ /// separate components to support up to i64 bit-field access (4 + 2 + 1 byte
+ /// accesses).
+ //
+ // FIXME: De-hardcode this, just allocate following the struct.
+ AccessInfo Components[3];
+
+ /// The total size of the bit-field, in bits.
+ unsigned Size;
+
+ /// The number of access components to use.
+ unsigned NumComponents;
+
+ /// Whether the bit-field is signed.
+ bool IsSigned : 1;
+
+public:
+ CGBitFieldInfo(unsigned Size, unsigned NumComponents, AccessInfo *_Components,
+ bool IsSigned) : Size(Size), NumComponents(NumComponents),
+ IsSigned(IsSigned) {
+ assert(NumComponents <= 3 && "invalid number of components!");
+ for (unsigned i = 0; i != NumComponents; ++i)
+ Components[i] = _Components[i];
+
+ // Check some invariants.
+ unsigned AccessedSize = 0;
+ for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
+ const AccessInfo &AI = getComponent(i);
+ AccessedSize += AI.TargetBitWidth;
+
+ // We shouldn't try to load 0 bits.
+ assert(AI.TargetBitWidth > 0);
+
+ // We can't load more bits than we accessed.
+ assert(AI.FieldBitStart + AI.TargetBitWidth <= AI.AccessWidth);
+
+ // We shouldn't put any bits outside the result size.
+ assert(AI.TargetBitWidth + AI.TargetBitOffset <= Size);
+ }
+
+ // Check that the total number of target bits matches the total bit-field
+ // size.
+ assert(AccessedSize == Size && "Total size does not match accessed size!");
+ }
+
+public:
+ /// \brief Check whether this bit-field access is (i.e., should be sign
+ /// extended on loads).
+ bool isSigned() const { return IsSigned; }
+
+ /// \brief Get the size of the bit-field, in bits.
+ unsigned getSize() const { return Size; }
+
+ /// @name Component Access
+ /// @{
+
+ unsigned getNumComponents() const { return NumComponents; }
+
+ const AccessInfo &getComponent(unsigned Index) const {
+ assert(Index < getNumComponents() && "Invalid access!");
+ return Components[Index];
+ }
+
+ /// @}
+
+ void print(llvm::raw_ostream &OS) const;
+ void dump() const;
+
+ /// \brief Given a bit-field decl, build an appropriate helper object for
+ /// accessing that field (which is expected to have the given offset and
+ /// size).
+ static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types, const FieldDecl *FD,
+ uint64_t FieldOffset, uint64_t FieldSize);
+
+ /// \brief Given a bit-field decl, build an appropriate helper object for
+ /// accessing that field (which is expected to have the given offset and
+ /// size). The field decl should be known to be contained within a type of at
+ /// least the given size and with the given alignment.
+ static CGBitFieldInfo MakeInfo(CodeGenTypes &Types, const FieldDecl *FD,
+ uint64_t FieldOffset, uint64_t FieldSize,
+ uint64_t ContainingTypeSizeInBits,
+ unsigned ContainingTypeAlign);
+};
+
+/// CGRecordLayout - This class handles struct and union layout info while
+/// lowering AST types to LLVM types.
+///
+/// These layout objects are only created on demand as IR generation requires.
+class CGRecordLayout {
+ friend class CodeGenTypes;
+
+ CGRecordLayout(const CGRecordLayout&); // DO NOT IMPLEMENT
+ void operator=(const CGRecordLayout&); // DO NOT IMPLEMENT
+
+private:
+ /// The LLVMType corresponding to this record layout.
+ const llvm::Type *LLVMType;
+
+ /// Map from (non-bit-field) struct field to the corresponding llvm struct
+ /// type field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
+
+ /// Map from (bit-field) struct field to the corresponding llvm struct type
+ /// field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+
+ // FIXME: Maybe we could use a CXXBaseSpecifier as the key and use a single
+ // map for both virtual and non virtual bases.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBaseFields;
+
+ /// Whether one of the fields in this record layout is a pointer to data
+ /// member, or a struct that contains pointer to data member.
+ bool IsZeroInitializable : 1;
+
+public:
+ CGRecordLayout(const llvm::Type *T, bool IsZeroInitializable)
+ : LLVMType(T), IsZeroInitializable(IsZeroInitializable) {}
+
+ /// \brief Return the LLVM type associated with this record.
+ const llvm::Type *getLLVMType() const {
+ return LLVMType;
+ }
+
+ /// \brief Check whether this struct can be C++ zero-initialized
+ /// with a zeroinitializer.
+ bool isZeroInitializable() const {
+ return IsZeroInitializable;
+ }
+
+ /// \brief Return llvm::StructType element number that corresponds to the
+ /// field FD.
+ unsigned getLLVMFieldNo(const FieldDecl *FD) const {
+ assert(!FD->isBitField() && "Invalid call for bit-field decl!");
+ assert(FieldInfo.count(FD) && "Invalid field for record!");
+ return FieldInfo.lookup(FD);
+ }
+
+ unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
+ assert(NonVirtualBaseFields.count(RD) && "Invalid non-virtual base!");
+ return NonVirtualBaseFields.lookup(RD);
+ }
+
+ /// \brief Return the BitFieldInfo that corresponds to the field FD.
+ const CGBitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
+ assert(FD->isBitField() && "Invalid call for non bit-field decl!");
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo>::const_iterator
+ it = BitFields.find(FD);
+ assert(it != BitFields.end() && "Unable to find bitfield info");
+ return it->second;
+ }
+
+ void print(llvm::raw_ostream &OS) const;
+ void dump() const;
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
new file mode 100644
index 0000000..77a319f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -0,0 +1,793 @@
+//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Builder implementation for CGRecordLayout objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGRecordLayout.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "CodeGenTypes.h"
+#include "CGCXXABI.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Type.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace clang {
+namespace CodeGen {
+
+class CGRecordLayoutBuilder {
+public:
+ /// FieldTypes - Holds the LLVM types that the struct is created from.
+ std::vector<const llvm::Type *> FieldTypes;
+
+ /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
+ typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
+ llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
+
+ /// LLVMBitFieldInfo - Holds location and size information about a bit field.
+ typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
+ llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
+
+ typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
+ llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
+
+ /// IsZeroInitializable - Whether this struct can be C++
+ /// zero-initialized with an LLVM zeroinitializer.
+ bool IsZeroInitializable;
+
+ /// Packed - Whether the resulting LLVM struct will be packed or not.
+ bool Packed;
+
+private:
+ CodeGenTypes &Types;
+
+ /// Alignment - Contains the alignment of the RecordDecl.
+ //
+ // FIXME: This is not needed and should be removed.
+ unsigned Alignment;
+
+ /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
+ /// LLVM types.
+ unsigned AlignmentAsLLVMStruct;
+
+ /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
+ /// this will have the number of bits still available in the field.
+ char BitsAvailableInLastField;
+
+ /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
+ uint64_t NextFieldOffsetInBytes;
+
+ /// LayoutUnionField - Will layout a field in an union and return the type
+ /// that the field will have.
+ const llvm::Type *LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout);
+
+ /// LayoutUnion - Will layout a union RecordDecl.
+ void LayoutUnion(const RecordDecl *D);
+
+ /// LayoutField - try to layout all fields in the record decl.
+ /// Returns false if the operation failed because the struct is not packed.
+ bool LayoutFields(const RecordDecl *D);
+
+ /// LayoutNonVirtualBase - layout a single non-virtual base.
+ void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
+ uint64_t BaseOffset);
+
+ /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
+ void LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
+
+ /// LayoutField - layout a single field. Returns false if the operation failed
+ /// because the current struct is not packed.
+ bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
+
+ /// LayoutBitField - layout a single bit field.
+ void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
+
+ /// AppendField - Appends a field with the given offset and type.
+ void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
+
+ /// AppendPadding - Appends enough padding bytes so that the total
+ /// struct size is a multiple of the field alignment.
+ void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
+
+ /// AppendBytes - Append a given number of bytes to the record.
+ void AppendBytes(uint64_t NumBytes);
+
+ /// AppendTailPadding - Append enough tail padding so that the type will have
+ /// the passed size.
+ void AppendTailPadding(uint64_t RecordSize);
+
+ unsigned getTypeAlignment(const llvm::Type *Ty) const;
+
+ /// CheckZeroInitializable - Check if the given type contains a pointer
+ /// to data member.
+ void CheckZeroInitializable(QualType T);
+ void CheckZeroInitializable(const CXXRecordDecl *RD);
+
+public:
+ CGRecordLayoutBuilder(CodeGenTypes &Types)
+ : IsZeroInitializable(true), Packed(false), Types(Types),
+ Alignment(0), AlignmentAsLLVMStruct(1),
+ BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
+
+ /// Layout - Will layout a RecordDecl.
+ void Layout(const RecordDecl *D);
+};
+
+}
+}
+
+void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
+ Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
+ Packed = D->hasAttr<PackedAttr>();
+
+ if (D->isUnion()) {
+ LayoutUnion(D);
+ return;
+ }
+
+ if (LayoutFields(D))
+ return;
+
+ // We weren't able to layout the struct. Try again with a packed struct
+ Packed = true;
+ AlignmentAsLLVMStruct = 1;
+ NextFieldOffsetInBytes = 0;
+ FieldTypes.clear();
+ LLVMFields.clear();
+ LLVMBitFields.clear();
+ LLVMNonVirtualBases.clear();
+
+ LayoutFields(D);
+}
+
+CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t FieldOffset,
+ uint64_t FieldSize,
+ uint64_t ContainingTypeSizeInBits,
+ unsigned ContainingTypeAlign) {
+ const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
+ uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
+ uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
+
+ bool IsSigned = FD->getType()->isSignedIntegerType();
+
+ if (FieldSize > TypeSizeInBits) {
+ // We have a wide bit-field. The extra bits are only used for padding, so
+ // if we have a bitfield of type T, with size N:
+ //
+ // T t : N;
+ //
+ // We can just assume that it's:
+ //
+ // T t : sizeof(T);
+ //
+ FieldSize = TypeSizeInBits;
+ }
+
+ // Compute the access components. The policy we use is to start by attempting
+ // to access using the width of the bit-field type itself and to always access
+ // at aligned indices of that type. If such an access would fail because it
+ // extends past the bound of the type, then we reduce size to the next smaller
+ // power of two and retry. The current algorithm assumes pow2 sized types,
+ // although this is easy to fix.
+ //
+ // FIXME: This algorithm is wrong on big-endian systems, I think.
+ assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
+ CGBitFieldInfo::AccessInfo Components[3];
+ unsigned NumComponents = 0;
+ unsigned AccessedTargetBits = 0; // The tumber of target bits accessed.
+ unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
+
+ // Round down from the field offset to find the first access position that is
+ // at an aligned offset of the initial access type.
+ uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+
+ // Adjust initial access size to fit within record.
+ while (AccessWidth > 8 &&
+ AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ AccessWidth >>= 1;
+ AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+ }
+
+ while (AccessedTargetBits < FieldSize) {
+ // Check that we can access using a type of this size, without reading off
+ // the end of the structure. This can occur with packed structures and
+ // -fno-bitfield-type-align, for example.
+ if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ // If so, reduce access size to the next smaller power-of-two and retry.
+ AccessWidth >>= 1;
+ assert(AccessWidth >= 8 && "Cannot access under byte size!");
+ continue;
+ }
+
+ // Otherwise, add an access component.
+
+ // First, compute the bits inside this access which are part of the
+ // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
+ // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
+ // in the target that we are reading.
+ assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
+ assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
+ uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
+ uint64_t AccessBitsInFieldSize =
+ std::min(AccessWidth + AccessStart,
+ FieldOffset + FieldSize) - AccessBitsInFieldStart;
+
+ assert(NumComponents < 3 && "Unexpected number of components!");
+ CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
+ AI.FieldIndex = 0;
+ // FIXME: We still follow the old access pattern of only using the field
+ // byte offset. We should switch this once we fix the struct layout to be
+ // pretty.
+ AI.FieldByteOffset = AccessStart / 8;
+ AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
+ AI.AccessWidth = AccessWidth;
+ AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
+ AI.TargetBitOffset = AccessedTargetBits;
+ AI.TargetBitWidth = AccessBitsInFieldSize;
+
+ AccessStart += AccessWidth;
+ AccessedTargetBits += AI.TargetBitWidth;
+ }
+
+ assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
+ return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
+}
+
+CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t FieldOffset,
+ uint64_t FieldSize) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
+ uint64_t ContainingTypeSizeInBits = RL.getSize();
+ unsigned ContainingTypeAlign = RL.getAlignment();
+
+ return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
+ ContainingTypeAlign);
+}
+
+void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
+ uint64_t FieldOffset) {
+ uint64_t FieldSize =
+ D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
+
+ if (FieldSize == 0)
+ return;
+
+ uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
+ unsigned NumBytesToAppend;
+
+ if (FieldOffset < NextFieldOffset) {
+ assert(BitsAvailableInLastField && "Bitfield size mismatch!");
+ assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
+
+ // The bitfield begins in the previous bit-field.
+ NumBytesToAppend =
+ llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
+ } else {
+ assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
+
+ // Append padding if necessary.
+ AppendBytes((FieldOffset - NextFieldOffset) / 8);
+
+ NumBytesToAppend =
+ llvm::RoundUpToAlignment(FieldSize, 8) / 8;
+
+ assert(NumBytesToAppend && "No bytes to append!");
+ }
+
+ // Add the bit field info.
+ LLVMBitFields.push_back(
+ LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
+ FieldSize)));
+
+ AppendBytes(NumBytesToAppend);
+
+ BitsAvailableInLastField =
+ NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
+}
+
+bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
+ uint64_t FieldOffset) {
+ // If the field is packed, then we need a packed struct.
+ if (!Packed && D->hasAttr<PackedAttr>())
+ return false;
+
+ if (D->isBitField()) {
+ // We must use packed structs for unnamed bit fields since they
+ // don't affect the struct alignment.
+ if (!Packed && !D->getDeclName())
+ return false;
+
+ LayoutBitField(D, FieldOffset);
+ return true;
+ }
+
+ CheckZeroInitializable(D->getType());
+
+ assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
+ uint64_t FieldOffsetInBytes = FieldOffset / 8;
+
+ const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
+ unsigned TypeAlignment = getTypeAlignment(Ty);
+
+ // If the type alignment is larger then the struct alignment, we must use
+ // a packed struct.
+ if (TypeAlignment > Alignment) {
+ assert(!Packed && "Alignment is wrong even with packed struct!");
+ return false;
+ }
+
+ if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
+ const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ if (const MaxFieldAlignmentAttr *MFAA =
+ RD->getAttr<MaxFieldAlignmentAttr>()) {
+ if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
+ return false;
+ }
+ }
+
+ // Round up the field offset to the alignment of the field type.
+ uint64_t AlignedNextFieldOffsetInBytes =
+ llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
+
+ if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
+ assert(!Packed && "Could not place field even with packed struct!");
+ return false;
+ }
+
+ if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ // Even with alignment, the field offset is not at the right place,
+ // insert padding.
+ uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
+
+ AppendBytes(PaddingInBytes);
+ }
+
+ // Now append the field.
+ LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
+ AppendField(FieldOffsetInBytes, Ty);
+
+ return true;
+}
+
+const llvm::Type *
+CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout) {
+ if (Field->isBitField()) {
+ uint64_t FieldSize =
+ Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
+
+ // Ignore zero sized bit fields.
+ if (FieldSize == 0)
+ return 0;
+
+ const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ unsigned NumBytesToAppend =
+ llvm::RoundUpToAlignment(FieldSize, 8) / 8;
+
+ if (NumBytesToAppend > 1)
+ FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
+
+ // Add the bit field info.
+ LLVMBitFields.push_back(
+ LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
+ 0, FieldSize)));
+ return FieldTy;
+ }
+
+ // This is a regular union field.
+ LLVMFields.push_back(LLVMFieldInfo(Field, 0));
+ return Types.ConvertTypeForMemRecursive(Field->getType());
+}
+
+void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
+ assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
+
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+
+ const llvm::Type *Ty = 0;
+ uint64_t Size = 0;
+ unsigned Align = 0;
+
+ bool HasOnlyZeroSizedBitFields = true;
+
+ unsigned FieldNo = 0;
+ for (RecordDecl::field_iterator Field = D->field_begin(),
+ FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ assert(Layout.getFieldOffset(FieldNo) == 0 &&
+ "Union field offset did not start at the beginning of record!");
+ const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
+
+ if (!FieldTy)
+ continue;
+
+ HasOnlyZeroSizedBitFields = false;
+
+ unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
+ uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
+
+ if (FieldAlign < Align)
+ continue;
+
+ if (FieldAlign > Align || FieldSize > Size) {
+ Ty = FieldTy;
+ Align = FieldAlign;
+ Size = FieldSize;
+ }
+ }
+
+ // Now add our field.
+ if (Ty) {
+ AppendField(0, Ty);
+
+ if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
+ // We need a packed struct.
+ Packed = true;
+ Align = 1;
+ }
+ }
+ if (!Align) {
+ assert(HasOnlyZeroSizedBitFields &&
+ "0-align record did not have all zero-sized bit-fields!");
+ Align = 1;
+ }
+
+ // Append tail padding.
+ if (Layout.getSize() / 8 > Size)
+ AppendPadding(Layout.getSize() / 8, Align);
+}
+
+void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
+ uint64_t BaseOffset) {
+ const ASTRecordLayout &Layout =
+ Types.getContext().getASTRecordLayout(BaseDecl);
+
+ uint64_t NonVirtualSize = Layout.getNonVirtualSize();
+
+ if (BaseDecl->isEmpty()) {
+ // FIXME: Lay out empty bases.
+ return;
+ }
+
+ CheckZeroInitializable(BaseDecl);
+
+ // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
+ AppendPadding(BaseOffset / 8, 1);
+
+ // Append the base field.
+ LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
+
+ AppendBytes(NonVirtualSize / 8);
+}
+
+void
+CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ // Check if we need to add a vtable pointer.
+ if (RD->isDynamicClass()) {
+ if (!PrimaryBase) {
+ const llvm::Type *FunctionType =
+ llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
+ /*isVarArg=*/true);
+ const llvm::Type *VTableTy = FunctionType->getPointerTo();
+
+ assert(NextFieldOffsetInBytes == 0 &&
+ "VTable pointer must come first!");
+ AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
+ } else {
+ // FIXME: Handle a virtual primary base.
+ if (!Layout.getPrimaryBaseWasVirtual())
+ LayoutNonVirtualBase(PrimaryBase, 0);
+ }
+ }
+
+ // Layout the non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // We've already laid out the primary base.
+ if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
+ continue;
+
+ LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
+ }
+}
+
+bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
+ assert(!D->isUnion() && "Can't call LayoutFields on a union!");
+ assert(Alignment && "Did not set alignment!");
+
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
+ LayoutNonVirtualBases(RD, Layout);
+
+ unsigned FieldNo = 0;
+
+ for (RecordDecl::field_iterator Field = D->field_begin(),
+ FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
+ assert(!Packed &&
+ "Could not layout fields even with a packed LLVM struct!");
+ return false;
+ }
+ }
+
+ // Append tail padding if necessary.
+ AppendTailPadding(Layout.getSize());
+
+ return true;
+}
+
+void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
+ assert(RecordSize % 8 == 0 && "Invalid record size!");
+
+ uint64_t RecordSizeInBytes = RecordSize / 8;
+ assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
+
+ uint64_t AlignedNextFieldOffset =
+ llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
+
+ if (AlignedNextFieldOffset == RecordSizeInBytes) {
+ // We don't need any padding.
+ return;
+ }
+
+ unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
+ AppendBytes(NumPadBytes);
+}
+
+void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
+ const llvm::Type *FieldTy) {
+ AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
+ getTypeAlignment(FieldTy));
+
+ uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
+
+ FieldTypes.push_back(FieldTy);
+
+ NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
+ BitsAvailableInLastField = 0;
+}
+
+void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
+ unsigned FieldAlignment) {
+ assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
+ "Incorrect field layout!");
+
+ // Round up the field offset to the alignment of the field type.
+ uint64_t AlignedNextFieldOffsetInBytes =
+ llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+
+ if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ // Even with alignment, the field offset is not at the right place,
+ // insert padding.
+ uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
+
+ AppendBytes(PaddingInBytes);
+ }
+}
+
+void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
+ if (NumBytes == 0)
+ return;
+
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ if (NumBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+ // Append the padding field
+ AppendField(NextFieldOffsetInBytes, Ty);
+}
+
+unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
+ if (Packed)
+ return 1;
+
+ return Types.getTargetData().getABITypeAlignment(Ty);
+}
+
+void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
+ // This record already contains a member pointer.
+ if (!IsZeroInitializable)
+ return;
+
+ // Can only have member pointers if we're compiling C++.
+ if (!Types.getContext().getLangOptions().CPlusPlus)
+ return;
+
+ T = Types.getContext().getBaseElementType(T);
+
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
+ if (!Types.getCXXABI().isZeroInitializable(MPT))
+ IsZeroInitializable = false;
+ } else if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ CheckZeroInitializable(RD);
+ }
+}
+
+void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
+ // This record already contains a member pointer.
+ if (!IsZeroInitializable)
+ return;
+
+ // FIXME: It would be better if there was a way to explicitly compute the
+ // record layout instead of converting to a type.
+ Types.ConvertTagDeclType(RD);
+
+ const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+
+ if (!Layout.isZeroInitializable())
+ IsZeroInitializable = false;
+}
+
+CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
+ CGRecordLayoutBuilder Builder(*this);
+
+ Builder.Layout(D);
+
+ const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
+ Builder.FieldTypes,
+ Builder.Packed);
+
+ CGRecordLayout *RL =
+ new CGRecordLayout(Ty, Builder.IsZeroInitializable);
+
+ // Add all the non-virtual base field numbers.
+ RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
+ Builder.LLVMNonVirtualBases.end());
+
+ // Add all the field numbers.
+ RL->FieldInfo.insert(Builder.LLVMFields.begin(),
+ Builder.LLVMFields.end());
+
+ // Add bitfield info.
+ RL->BitFields.insert(Builder.LLVMBitFields.begin(),
+ Builder.LLVMBitFields.end());
+
+ // Dump the layout, if requested.
+ if (getContext().getLangOptions().DumpRecordLayouts) {
+ llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
+ llvm::errs() << "Record: ";
+ D->dump();
+ llvm::errs() << "\nLayout: ";
+ RL->dump();
+ }
+
+#ifndef NDEBUG
+ // Verify that the computed LLVM struct size matches the AST layout size.
+ uint64_t TypeSizeInBits = getContext().getASTRecordLayout(D).getSize();
+ assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
+ "Type size mismatch!");
+
+ // Verify that the LLVM and AST field offsets agree.
+ const llvm::StructType *ST =
+ dyn_cast<llvm::StructType>(RL->getLLVMType());
+ const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
+
+ const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
+ RecordDecl::field_iterator it = D->field_begin();
+ for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
+ const FieldDecl *FD = *it;
+
+ // For non-bit-fields, just check that the LLVM struct offset matches the
+ // AST offset.
+ if (!FD->isBitField()) {
+ unsigned FieldNo = RL->getLLVMFieldNo(FD);
+ assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
+ "Invalid field offset!");
+ continue;
+ }
+
+ // Ignore unnamed bit-fields.
+ if (!FD->getDeclName())
+ continue;
+
+ const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Verify that every component access is within the structure.
+ uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
+ uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
+ assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
+ "Invalid bit-field access (out of range)!");
+ }
+ }
+#endif
+
+ return RL;
+}
+
+void CGRecordLayout::print(llvm::raw_ostream &OS) const {
+ OS << "<CGRecordLayout\n";
+ OS << " LLVMType:" << *LLVMType << "\n";
+ OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
+ OS << " BitFields:[\n";
+
+ // Print bit-field infos in declaration order.
+ std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
+ for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
+ it = BitFields.begin(), ie = BitFields.end();
+ it != ie; ++it) {
+ const RecordDecl *RD = it->first->getParent();
+ unsigned Index = 0;
+ for (RecordDecl::field_iterator
+ it2 = RD->field_begin(); *it2 != it->first; ++it2)
+ ++Index;
+ BFIs.push_back(std::make_pair(Index, &it->second));
+ }
+ llvm::array_pod_sort(BFIs.begin(), BFIs.end());
+ for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
+ OS.indent(4);
+ BFIs[i].second->print(OS);
+ OS << "\n";
+ }
+
+ OS << "]>\n";
+}
+
+void CGRecordLayout::dump() const {
+ print(llvm::errs());
+}
+
+void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
+ OS << "<CGBitFieldInfo";
+ OS << " Size:" << Size;
+ OS << " IsSigned:" << IsSigned << "\n";
+
+ OS.indent(4 + strlen("<CGBitFieldInfo"));
+ OS << " NumComponents:" << getNumComponents();
+ OS << " Components: [";
+ if (getNumComponents()) {
+ OS << "\n";
+ for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
+ const AccessInfo &AI = getComponent(i);
+ OS.indent(8);
+ OS << "<AccessInfo"
+ << " FieldIndex:" << AI.FieldIndex
+ << " FieldByteOffset:" << AI.FieldByteOffset
+ << " FieldBitStart:" << AI.FieldBitStart
+ << " AccessWidth:" << AI.AccessWidth << "\n";
+ OS.indent(8 + strlen("<AccessInfo"));
+ OS << " AccessAlignment:" << AI.AccessAlignment
+ << " TargetBitOffset:" << AI.TargetBitOffset
+ << " TargetBitWidth:" << AI.TargetBitWidth
+ << ">\n";
+ }
+ OS.indent(4);
+ }
+ OS << "]>";
+}
+
+void CGBitFieldInfo::dump() const {
+ print(llvm::errs());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
new file mode 100644
index 0000000..16145f7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -0,0 +1,1211 @@
+//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Stmt nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Statement Emission
+//===----------------------------------------------------------------------===//
+
+void CodeGenFunction::EmitStopPoint(const Stmt *S) {
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (isa<DeclStmt>(S))
+ DI->setLocation(S->getLocEnd());
+ else
+ DI->setLocation(S->getLocStart());
+ DI->UpdateLineDirectiveRegion(Builder);
+ DI->EmitStopPoint(Builder);
+ }
+}
+
+void CodeGenFunction::EmitStmt(const Stmt *S) {
+ assert(S && "Null statement?");
+
+ // Check if we can handle this without bothering to generate an
+ // insert point or debug info.
+ if (EmitSimpleStmt(S))
+ return;
+
+ // Check if we are generating unreachable code.
+ if (!HaveInsertPoint()) {
+ // If so, and the statement doesn't contain a label, then we do not need to
+ // generate actual code. This is safe because (1) the current point is
+ // unreachable, so we don't need to execute the code, and (2) we've already
+ // handled the statements which update internal data structures (like the
+ // local variable map) which could be used by subsequent statements.
+ if (!ContainsLabel(S)) {
+ // Verify that any decl statements were handled as simple, they may be in
+ // scope of subsequent reachable statements.
+ assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
+ return;
+ }
+
+ // Otherwise, make a new block to hold the code.
+ EnsureInsertPoint();
+ }
+
+ // Generate a stoppoint if we are emitting debug info.
+ EmitStopPoint(S);
+
+ switch (S->getStmtClass()) {
+ default:
+ // Must be an expression in a stmt context. Emit the value (to get
+ // side-effects) and ignore the result.
+ if (!isa<Expr>(S))
+ ErrorUnsupported(S, "statement");
+
+ EmitAnyExpr(cast<Expr>(S), 0, false, true);
+
+ // Expression emitters don't handle unreachable blocks yet, so look for one
+ // explicitly here. This handles the common case of a call to a noreturn
+ // function.
+ if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
+ if (CurBB->empty() && CurBB->use_empty()) {
+ CurBB->eraseFromParent();
+ Builder.ClearInsertionPoint();
+ }
+ }
+ break;
+ case Stmt::IndirectGotoStmtClass:
+ EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
+
+ case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
+ case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
+ case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
+ case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
+
+ case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
+
+ case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
+ case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
+
+ case Stmt::ObjCAtTryStmtClass:
+ EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
+ break;
+ case Stmt::ObjCAtCatchStmtClass:
+ assert(0 && "@catch statements should be handled by EmitObjCAtTryStmt");
+ break;
+ case Stmt::ObjCAtFinallyStmtClass:
+ assert(0 && "@finally statements should be handled by EmitObjCAtTryStmt");
+ break;
+ case Stmt::ObjCAtThrowStmtClass:
+ EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
+ break;
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
+ break;
+
+ case Stmt::CXXTryStmtClass:
+ EmitCXXTryStmt(cast<CXXTryStmt>(*S));
+ break;
+ }
+}
+
+bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default: return false;
+ case Stmt::NullStmtClass: break;
+ case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
+ case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
+ case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
+ case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
+ case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
+ case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
+ case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
+ case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
+ }
+
+ return true;
+}
+
+/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
+/// this captures the expression result of the last sub-statement and returns it
+/// (for use by the statement expression extension).
+RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+ llvm::Value *AggLoc, bool isAggVol) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
+ "LLVM IR generation of compound statement ('{}')");
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getLBracLoc());
+ DI->EmitRegionStart(Builder);
+ }
+
+ // Keep track of the current cleanup stack depth.
+ RunCleanupsScope Scope(*this);
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end()-GetLast; I != E; ++I)
+ EmitStmt(*I);
+
+ if (DI) {
+ DI->setLocation(S.getRBracLoc());
+ DI->EmitRegionEnd(Builder);
+ }
+
+ RValue RV;
+ if (!GetLast)
+ RV = RValue::get(0);
+ else {
+ // We have to special case labels here. They are statements, but when put
+ // at the end of a statement expression, they yield the value of their
+ // subexpression. Handle this by walking through all labels we encounter,
+ // emitting them before we evaluate the subexpr.
+ const Stmt *LastStmt = S.body_back();
+ while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
+ EmitLabel(*LS);
+ LastStmt = LS->getSubStmt();
+ }
+
+ EnsureInsertPoint();
+
+ RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
+ }
+
+ return RV;
+}
+
+void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
+ llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
+
+ // If there is a cleanup stack, then we it isn't worth trying to
+ // simplify this block (we would need to remove it from the scope map
+ // and cleanup entry).
+ if (!EHStack.empty())
+ return;
+
+ // Can only simplify direct branches.
+ if (!BI || !BI->isUnconditional())
+ return;
+
+ BB->replaceAllUsesWith(BI->getSuccessor(0));
+ BI->eraseFromParent();
+ BB->eraseFromParent();
+}
+
+void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ // Fall out of the current block (if necessary).
+ EmitBranch(BB);
+
+ if (IsFinished && BB->use_empty()) {
+ delete BB;
+ return;
+ }
+
+ // Place the block after the current block, if possible, or else at
+ // the end of the function.
+ if (CurBB && CurBB->getParent())
+ CurFn->getBasicBlockList().insertAfter(CurBB, BB);
+ else
+ CurFn->getBasicBlockList().push_back(BB);
+ Builder.SetInsertPoint(BB);
+}
+
+void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
+ // Emit a branch from the current block to the target one if this
+ // was a real block. If this was just a fall-through block after a
+ // terminator, don't emit it.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (!CurBB || CurBB->getTerminator()) {
+ // If there is no insert point or the previous block is already
+ // terminated, don't touch it.
+ } else {
+ // Otherwise, create a fall-through branch.
+ Builder.CreateBr(Target);
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+CodeGenFunction::JumpDest
+CodeGenFunction::getJumpDestForLabel(const LabelStmt *S) {
+ JumpDest &Dest = LabelMap[S];
+ if (Dest.isValid()) return Dest;
+
+ // Create, but don't insert, the new block.
+ Dest = JumpDest(createBasicBlock(S->getName()),
+ EHScopeStack::stable_iterator::invalid(),
+ NextCleanupDestIndex++);
+ return Dest;
+}
+
+void CodeGenFunction::EmitLabel(const LabelStmt &S) {
+ JumpDest &Dest = LabelMap[&S];
+
+ // If we didn't need a forward reference to this label, just go
+ // ahead and create a destination at the current scope.
+ if (!Dest.isValid()) {
+ Dest = getJumpDestInCurrentScope(S.getName());
+
+ // Otherwise, we need to give this label a target depth and remove
+ // it from the branch-fixups list.
+ } else {
+ assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
+ Dest = JumpDest(Dest.getBlock(),
+ EHStack.stable_begin(),
+ Dest.getDestIndex());
+
+ ResolveBranchFixups(Dest.getBlock());
+ }
+
+ EmitBlock(Dest.getBlock());
+}
+
+
+void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
+ EmitLabel(S);
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
+}
+
+
+void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+ // Ensure that we have an i8* for our PHI node.
+ llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
+ llvm::Type::getInt8PtrTy(VMContext),
+ "addr");
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+
+ // Get the basic block for the indirect goto.
+ llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
+
+ // The first instruction in the block has to be the PHI for the switch dest,
+ // add an entry for this branch.
+ cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
+
+ EmitBranch(IndGotoBB);
+}
+
+void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
+ // C99 6.8.4.1: The first substatement is executed if the expression compares
+ // unequal to 0. The condition must be a scalar type.
+ RunCleanupsScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitLocalBlockVarDecl(*S.getConditionVariable());
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm of the if/else.
+ if (int Cond = ConstantFoldsToSimpleInteger(S.getCond())) {
+ // Figure out which block (then or else) is executed.
+ const Stmt *Executed = S.getThen(), *Skipped = S.getElse();
+ if (Cond == -1) // Condition false?
+ std::swap(Executed, Skipped);
+
+ // If the skipped block has no labels in it, just emit the executed block.
+ // This avoids emitting dead code and simplifies the CFG substantially.
+ if (!ContainsLabel(Skipped)) {
+ if (Executed) {
+ RunCleanupsScope ExecutedScope(*this);
+ EmitStmt(Executed);
+ }
+ return;
+ }
+ }
+
+ // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
+ // the conditional branch.
+ llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
+ llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
+ llvm::BasicBlock *ElseBlock = ContBlock;
+ if (S.getElse())
+ ElseBlock = createBasicBlock("if.else");
+ EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
+
+ // Emit the 'then' code.
+ EmitBlock(ThenBlock);
+ {
+ RunCleanupsScope ThenScope(*this);
+ EmitStmt(S.getThen());
+ }
+ EmitBranch(ContBlock);
+
+ // Emit the 'else' code if present.
+ if (const Stmt *Else = S.getElse()) {
+ EmitBlock(ElseBlock);
+ {
+ RunCleanupsScope ElseScope(*this);
+ EmitStmt(Else);
+ }
+ EmitBranch(ContBlock);
+ }
+
+ // Emit the continuation block for code after the if.
+ EmitBlock(ContBlock, true);
+}
+
+void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
+ // Emit the header for the loop, which will also become
+ // the continue target.
+ JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
+ EmitBlock(LoopHeader.getBlock());
+
+ // Create an exit block for when the condition fails, which will
+ // also become the break target.
+ JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
+
+ // C++ [stmt.while]p2:
+ // When the condition of a while statement is a declaration, the
+ // scope of the variable that is declared extends from its point
+ // of declaration (3.3.2) to the end of the while statement.
+ // [...]
+ // The object created in a condition is destroyed and created
+ // with each iteration of the loop.
+ RunCleanupsScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitLocalBlockVarDecl(*S.getConditionVariable());
+
+ // Evaluate the conditional in the while header. C99 6.8.5.1: The
+ // evaluation of the controlling expression takes place before each
+ // execution of the loop body.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // while(1) is common, avoid extra exit blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isOne())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, go to the loop body.
+ llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
+ if (EmitBoolCondBranch) {
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (ConditionScope.requiresCleanups())
+ ExitBlock = createBasicBlock("while.exit");
+
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+ }
+
+ // Emit the loop body. We have to emit this in a cleanup scope
+ // because it might be a singleton DeclStmt.
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitBlock(LoopBody);
+ EmitStmt(S.getBody());
+ }
+
+ BreakContinueStack.pop_back();
+
+ // Immediately force cleanup.
+ ConditionScope.ForceCleanup();
+
+ // Branch to the loop header again.
+ EmitBranch(LoopHeader.getBlock());
+
+ // Emit the exit block.
+ EmitBlock(LoopExit.getBlock(), true);
+
+ // The LoopHeader typically is just a branch if we skipped emitting
+ // a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopHeader.getBlock());
+}
+
+void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
+ JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
+
+ // Emit the body of the loop.
+ llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+ EmitBlock(LoopBody);
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
+
+ BreakContinueStack.pop_back();
+
+ EmitBlock(LoopCond.getBlock());
+
+ // C99 6.8.5.2: "The evaluation of the controlling expression takes place
+ // after each execution of the loop body."
+
+ // Evaluate the conditional in the while header.
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isZero())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, iterate the loop.
+ if (EmitBoolCondBranch)
+ Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock());
+
+ // Emit the exit block.
+ EmitBlock(LoopExit.getBlock());
+
+ // The DoCond block typically is just a branch if we skipped
+ // emitting a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopCond.getBlock());
+}
+
+void CodeGenFunction::EmitForStmt(const ForStmt &S) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ RunCleanupsScope ForScope(*this);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getBegin());
+ DI->EmitRegionStart(Builder);
+ }
+
+ // Evaluate the first part before the loop.
+ if (S.getInit())
+ EmitStmt(S.getInit());
+
+ // Start the loop with a block that tests the condition.
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ JumpDest Continue = getJumpDestInCurrentScope("for.cond");
+ llvm::BasicBlock *CondBlock = Continue.getBlock();
+ EmitBlock(CondBlock);
+
+ // Create a cleanup scope for the condition variable cleanups.
+ RunCleanupsScope ConditionScope(*this);
+
+ llvm::Value *BoolCondVal = 0;
+ if (S.getCond()) {
+ // If the for statement has a condition scope, emit the local variable
+ // declaration.
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (S.getConditionVariable()) {
+ EmitLocalBlockVarDecl(*S.getConditionVariable());
+ }
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
+
+ // As long as the condition is true, iterate the loop.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ BoolCondVal = EvaluateExprAsBool(S.getCond());
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+
+ EmitBlock(ForBody);
+ } else {
+ // Treat it as a non-zero constant. Don't even create a new block for the
+ // body, just fall into it.
+ }
+
+ // If the for loop doesn't have an increment we can just use the
+ // condition as the continue block. Otherwise we'll need to create
+ // a block for it (in the current scope, i.e. in the scope of the
+ // condition), and that we will become our continue block.
+ if (S.getInc())
+ Continue = getJumpDestInCurrentScope("for.inc");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ {
+ // Create a separate cleanup scope for the body, in case it is not
+ // a compound statement.
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
+
+ // If there is an increment, emit it next.
+ if (S.getInc()) {
+ EmitBlock(Continue.getBlock());
+ EmitStmt(S.getInc());
+ }
+
+ BreakContinueStack.pop_back();
+
+ ConditionScope.ForceCleanup();
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getEnd());
+ DI->EmitRegionEnd(Builder);
+ }
+
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock(), true);
+}
+
+void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
+ if (RV.isScalar()) {
+ Builder.CreateStore(RV.getScalarVal(), ReturnValue);
+ } else if (RV.isAggregate()) {
+ EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
+ } else {
+ StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
+ }
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
+/// if the function returns void, or may be missing one if the function returns
+/// non-void. Fun stuff :).
+void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
+ // Emit the result value, even if unused, to evalute the side effects.
+ const Expr *RV = S.getRetValue();
+
+ // FIXME: Clean this up by using an LValue for ReturnTemp,
+ // EmitStoreThroughLValue, and EmitAnyExpr.
+ if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
+ !Target.useGlobalsForAutomaticVariables()) {
+ // Apply the named return value optimization for this return statement,
+ // which means doing nothing: the appropriate result has already been
+ // constructed into the NRVO variable.
+
+ // If there is an NRVO flag for this variable, set it to 1 into indicate
+ // that the cleanup code should not destroy the variable.
+ if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) {
+ const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
+ llvm::Value *One = llvm::ConstantInt::get(BoolTy, 1);
+ Builder.CreateStore(One, NRVOFlag);
+ }
+ } else if (!ReturnValue) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (RV)
+ EmitAnyExpr(RV);
+ } else if (RV == 0) {
+ // Do nothing (return value is left uninitialized)
+ } else if (FnRetTy->isReferenceType()) {
+ // If this function returns a reference, take the address of the expression
+ // rather than the value.
+ RValue Result = EmitReferenceBindingToExpr(RV, /*InitializedDecl=*/0);
+ Builder.CreateStore(Result.getScalarVal(), ReturnValue);
+ } else if (!hasAggregateLLVMType(RV->getType())) {
+ Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
+ } else if (RV->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(RV, ReturnValue, false);
+ } else {
+ EmitAggExpr(RV, ReturnValue, false);
+ }
+
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
+ // As long as debug info is modeled with instructions, we have to ensure we
+ // have a place to insert here and write the stop point here.
+ if (getDebugInfo()) {
+ EnsureInsertPoint();
+ EmitStopPoint(&S);
+ }
+
+ for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
+ I != E; ++I)
+ EmitDecl(**I);
+}
+
+void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
+ assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ JumpDest Block = BreakContinueStack.back().BreakBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
+ assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ JumpDest Block = BreakContinueStack.back().ContinueBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+/// EmitCaseStmtRange - If case statement range is not too big then
+/// add multiple cases to switch instruction, one for each value within
+/// the range. If range is too big then emit "if" condition check.
+void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
+ assert(S.getRHS() && "Expected RHS value in CaseStmt");
+
+ llvm::APSInt LHS = S.getLHS()->EvaluateAsInt(getContext());
+ llvm::APSInt RHS = S.getRHS()->EvaluateAsInt(getContext());
+
+ // Emit the code for this case. We do this first to make sure it is
+ // properly chained from our predecessor before generating the
+ // switch machinery to enter this block.
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ EmitStmt(S.getSubStmt());
+
+ // If range is empty, do nothing.
+ if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
+ return;
+
+ llvm::APInt Range = RHS - LHS;
+ // FIXME: parameters such as this should not be hardcoded.
+ if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
+ // Range is small enough to add multiple switch instruction cases.
+ for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
+ SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, LHS), CaseDest);
+ LHS++;
+ }
+ return;
+ }
+
+ // The range is too big. Emit "if" condition into a new block,
+ // making sure to save and restore the current insertion point.
+ llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
+
+ // Push this test onto the chain of range checks (which terminates
+ // in the default basic block). The switch's default will be changed
+ // to the top of this chain after switch emission is complete.
+ llvm::BasicBlock *FalseDest = CaseRangeBlock;
+ CaseRangeBlock = createBasicBlock("sw.caserange");
+
+ CurFn->getBasicBlockList().push_back(CaseRangeBlock);
+ Builder.SetInsertPoint(CaseRangeBlock);
+
+ // Emit range check.
+ llvm::Value *Diff =
+ Builder.CreateSub(SwitchInsn->getCondition(),
+ llvm::ConstantInt::get(VMContext, LHS), "tmp");
+ llvm::Value *Cond =
+ Builder.CreateICmpULE(Diff,
+ llvm::ConstantInt::get(VMContext, Range), "tmp");
+ Builder.CreateCondBr(Cond, CaseDest, FalseDest);
+
+ // Restore the appropriate insertion point.
+ if (RestoreBB)
+ Builder.SetInsertPoint(RestoreBB);
+ else
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+ if (S.getRHS()) {
+ EmitCaseStmtRange(S);
+ return;
+ }
+
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
+ SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+
+ // Recursively emitting the statement is acceptable, but is not wonderful for
+ // code where we have many case statements nested together, i.e.:
+ // case 1:
+ // case 2:
+ // case 3: etc.
+ // Handling this recursively will create a new block for each case statement
+ // that falls through to the next case which is IR intensive. It also causes
+ // deep recursion which can run into stack depth limitations. Handle
+ // sequential non-range case statements specially.
+ const CaseStmt *CurCase = &S;
+ const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
+
+ // Otherwise, iteratively add consequtive cases to this switch stmt.
+ while (NextCase && NextCase->getRHS() == 0) {
+ CurCase = NextCase;
+ CaseVal = CurCase->getLHS()->EvaluateAsInt(getContext());
+ SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+
+ NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
+ }
+
+ // Normal default recursion for non-cases.
+ EmitStmt(CurCase->getSubStmt());
+}
+
+void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
+ llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
+ assert(DefaultBlock->empty() &&
+ "EmitDefaultStmt: Default block already defined?");
+ EmitBlock(DefaultBlock);
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
+ JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
+
+ RunCleanupsScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitLocalBlockVarDecl(*S.getConditionVariable());
+
+ llvm::Value *CondV = EmitScalarExpr(S.getCond());
+
+ // Handle nested switch statements.
+ llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+ llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
+
+ // Create basic block to hold stuff that comes after switch
+ // statement. We also need to create a default block now so that
+ // explicit case ranges tests can have a place to jump to on
+ // failure.
+ llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
+ SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
+ CaseRangeBlock = DefaultBlock;
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+
+ // All break statements jump to NextBlock. If BreakContinueStack is non empty
+ // then reuse last ContinueBlock.
+ JumpDest OuterContinue;
+ if (!BreakContinueStack.empty())
+ OuterContinue = BreakContinueStack.back().ContinueBlock;
+
+ BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
+
+ // Emit switch body.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // Update the default block in case explicit case range tests have
+ // been chained on top.
+ SwitchInsn->setSuccessor(0, CaseRangeBlock);
+
+ // If a default was never emitted:
+ if (!DefaultBlock->getParent()) {
+ // If we have cleanups, emit the default block so that there's a
+ // place to jump through the cleanups from.
+ if (ConditionScope.requiresCleanups()) {
+ EmitBlock(DefaultBlock);
+
+ // Otherwise, just forward the default block to the switch end.
+ } else {
+ DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
+ delete DefaultBlock;
+ }
+ }
+
+ ConditionScope.ForceCleanup();
+
+ // Emit continuation.
+ EmitBlock(SwitchExit.getBlock(), true);
+
+ SwitchInsn = SavedSwitchInsn;
+ CaseRangeBlock = SavedCRBlock;
+}
+
+static std::string
+SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
+ llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
+ std::string Result;
+ std::string tmp;
+
+ while (*Constraint) {
+ switch (*Constraint) {
+ default:
+ tmp = Target.convertConstraint(*Constraint);
+ if (Result.find(tmp) == std::string::npos) // Combine unique constraints
+ Result += tmp;
+ break;
+ // Ignore these
+ case '*':
+ case '?':
+ case '!':
+ case '=': // Will see this and the following in mult-alt constraints.
+ case '+':
+ break;
+ case ',': // FIXME - Until the back-end properly supports
+ return Result; // multiple alternative constraints, we stop here.
+ break;
+ case 'g':
+ Result += "imr";
+ break;
+ case '[': {
+ assert(OutCons &&
+ "Must pass output names to constraints with a symbolic name");
+ unsigned Index;
+ bool result = Target.resolveSymbolicName(Constraint,
+ &(*OutCons)[0],
+ OutCons->size(), Index);
+ assert(result && "Could not resolve symbolic name"); result=result;
+ Result += llvm::utostr(Index);
+ break;
+ }
+ }
+
+ Constraint++;
+ }
+
+ return Result;
+}
+
+llvm::Value*
+CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ LValue InputValue, QualType InputType,
+ std::string &ConstraintStr) {
+ llvm::Value *Arg;
+ if (Info.allowsRegister() || !Info.allowsMemory()) {
+ if (!CodeGenFunction::hasAggregateLLVMType(InputType)) {
+ Arg = EmitLoadOfLValue(InputValue, InputType).getScalarVal();
+ } else {
+ const llvm::Type *Ty = ConvertType(InputType);
+ uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
+ if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
+ Ty = llvm::IntegerType::get(VMContext, Size);
+ Ty = llvm::PointerType::getUnqual(Ty);
+
+ Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
+ Ty));
+ } else {
+ Arg = InputValue.getAddress();
+ ConstraintStr += '*';
+ }
+ }
+ } else {
+ Arg = InputValue.getAddress();
+ ConstraintStr += '*';
+ }
+
+ return Arg;
+}
+
+llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
+ if (Info.allowsRegister() || !Info.allowsMemory())
+ if (!CodeGenFunction::hasAggregateLLVMType(InputExpr->getType()))
+ return EmitScalarExpr(InputExpr);
+
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+ return EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(), ConstraintStr);
+}
+
+void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
+ // Analyze the asm string to decompose it into its pieces. We know that Sema
+ // has already done this, so it is guaranteed to be successful.
+ llvm::SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
+ unsigned DiagOffs;
+ S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
+
+ // Assemble the pieces into the final asm string.
+ std::string AsmString;
+ for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+ if (Pieces[i].isString())
+ AsmString += Pieces[i].getString();
+ else if (Pieces[i].getModifier() == '\0')
+ AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
+ else
+ AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
+ Pieces[i].getModifier() + '}';
+ }
+
+ // Get all the output and input constraints together.
+ llvm::SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+ llvm::SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
+ S.getOutputName(i));
+ bool IsValid = Target.validateOutputConstraint(Info); (void)IsValid;
+ assert(IsValid && "Failed to parse output constraint");
+ OutputConstraintInfos.push_back(Info);
+ }
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
+ S.getInputName(i));
+ bool IsValid = Target.validateInputConstraint(OutputConstraintInfos.data(),
+ S.getNumOutputs(), Info);
+ assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
+ InputConstraintInfos.push_back(Info);
+ }
+
+ std::string Constraints;
+
+ std::vector<LValue> ResultRegDests;
+ std::vector<QualType> ResultRegQualTys;
+ std::vector<const llvm::Type *> ResultRegTypes;
+ std::vector<const llvm::Type *> ResultTruncRegTypes;
+ std::vector<const llvm::Type*> ArgTypes;
+ std::vector<llvm::Value*> Args;
+
+ // Keep track of inout constraints.
+ std::string InOutConstraints;
+ std::vector<llvm::Value*> InOutArgs;
+ std::vector<const llvm::Type*> InOutArgTypes;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+
+ // Simplify the output constraint.
+ std::string OutputConstraint(S.getOutputConstraint(i));
+ OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
+
+ const Expr *OutExpr = S.getOutputExpr(i);
+ OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+
+ LValue Dest = EmitLValue(OutExpr);
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // If this is a register output, then make the inline asm return it
+ // by-value. If this is a memory result, return the value by-reference.
+ if (!Info.allowsMemory() && !hasAggregateLLVMType(OutExpr->getType())) {
+ Constraints += "=" + OutputConstraint;
+ ResultRegQualTys.push_back(OutExpr->getType());
+ ResultRegDests.push_back(Dest);
+ ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
+ ResultTruncRegTypes.push_back(ResultRegTypes.back());
+
+ // If this output is tied to an input, and if the input is larger, then
+ // we need to set the actual result type of the inline asm node to be the
+ // same as the input type.
+ if (Info.hasMatchingInput()) {
+ unsigned InputNo;
+ for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
+ TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
+ if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
+ break;
+ }
+ assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
+
+ QualType InputTy = S.getInputExpr(InputNo)->getType();
+ QualType OutputType = OutExpr->getType();
+
+ uint64_t InputSize = getContext().getTypeSize(InputTy);
+ if (getContext().getTypeSize(OutputType) < InputSize) {
+ // Form the asm to return the value as a larger integer or fp type.
+ ResultRegTypes.back() = ConvertType(InputTy);
+ }
+ }
+ } else {
+ ArgTypes.push_back(Dest.getAddress()->getType());
+ Args.push_back(Dest.getAddress());
+ Constraints += "=*";
+ Constraints += OutputConstraint;
+ }
+
+ if (Info.isReadWrite()) {
+ InOutConstraints += ',';
+
+ const Expr *InputExpr = S.getOutputExpr(i);
+ llvm::Value *Arg = EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(),
+ InOutConstraints);
+
+ if (Info.allowsRegister())
+ InOutConstraints += llvm::utostr(i);
+ else
+ InOutConstraints += OutputConstraint;
+
+ InOutArgTypes.push_back(Arg->getType());
+ InOutArgs.push_back(Arg);
+ }
+ }
+
+ unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ const Expr *InputExpr = S.getInputExpr(i);
+
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // Simplify the input constraint.
+ std::string InputConstraint(S.getInputConstraint(i));
+ InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
+ &OutputConstraintInfos);
+
+ llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
+
+ // If this input argument is tied to a larger output result, extend the
+ // input to be the same size as the output. The LLVM backend wants to see
+ // the input and output of a matching constraint be the same size. Note
+ // that GCC does not define what the top bits are here. We use zext because
+ // that is usually cheaper, but LLVM IR should really get an anyext someday.
+ if (Info.hasTiedOperand()) {
+ unsigned Output = Info.getTiedOperand();
+ QualType OutputType = S.getOutputExpr(Output)->getType();
+ QualType InputTy = InputExpr->getType();
+
+ if (getContext().getTypeSize(OutputType) >
+ getContext().getTypeSize(InputTy)) {
+ // Use ptrtoint as appropriate so that we can do our extension.
+ if (isa<llvm::PointerType>(Arg->getType()))
+ Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
+ const llvm::Type *OutputTy = ConvertType(OutputType);
+ if (isa<llvm::IntegerType>(OutputTy))
+ Arg = Builder.CreateZExt(Arg, OutputTy);
+ else
+ Arg = Builder.CreateFPExt(Arg, OutputTy);
+ }
+ }
+
+
+ ArgTypes.push_back(Arg->getType());
+ Args.push_back(Arg);
+ Constraints += InputConstraint;
+ }
+
+ // Append the "input" part of inout constraints last.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
+ // Clobbers
+ for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
+ llvm::StringRef Clobber = S.getClobber(i)->getString();
+
+ Clobber = Target.getNormalizedGCCRegisterName(Clobber);
+
+ if (i != 0 || NumConstraints != 0)
+ Constraints += ',';
+
+ Constraints += "~{";
+ Constraints += Clobber;
+ Constraints += '}';
+ }
+
+ // Add machine specific clobbers
+ std::string MachineClobbers = Target.getClobbers();
+ if (!MachineClobbers.empty()) {
+ if (!Constraints.empty())
+ Constraints += ',';
+ Constraints += MachineClobbers;
+ }
+
+ const llvm::Type *ResultType;
+ if (ResultRegTypes.empty())
+ ResultType = llvm::Type::getVoidTy(VMContext);
+ else if (ResultRegTypes.size() == 1)
+ ResultType = ResultRegTypes[0];
+ else
+ ResultType = llvm::StructType::get(VMContext, ResultRegTypes);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ResultType, ArgTypes, false);
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, AsmString, Constraints,
+ S.isVolatile() || S.getNumOutputs() == 0);
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args.begin(), Args.end());
+ Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+
+ // Slap the source location of the inline asm into a !srcloc metadata on the
+ // call.
+ unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding();
+ llvm::Value *LocIDC =
+ llvm::ConstantInt::get(Int32Ty, LocID);
+ Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1));
+
+ // Extract all of the register value results from the asm.
+ std::vector<llvm::Value*> RegResults;
+ if (ResultRegTypes.size() == 1) {
+ RegResults.push_back(Result);
+ } else {
+ for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
+ llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
+ RegResults.push_back(Tmp);
+ }
+ }
+
+ for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+ llvm::Value *Tmp = RegResults[i];
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
+ const llvm::Type *TruncTy = ResultTruncRegTypes[i];
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (TruncTy->isFloatingPointTy())
+ Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
+ else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
+ uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext,
+ (unsigned)ResSize));
+ Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
+ uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
+ Tmp = Builder.CreatePtrToInt(Tmp, llvm::IntegerType::get(VMContext,
+ (unsigned)TmpSize));
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isIntegerTy()) {
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ }
+ }
+
+ EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i],
+ ResultRegQualTys[i]);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
new file mode 100644
index 0000000..dfb8dc6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
@@ -0,0 +1,92 @@
+//===--- CGTemporaries.cpp - Emit LLVM Code for C++ temporaries -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of temporaries
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+ struct DestroyTemporary : EHScopeStack::Cleanup {
+ const CXXTemporary *Temporary;
+ llvm::Value *Addr;
+ llvm::Value *CondPtr;
+
+ DestroyTemporary(const CXXTemporary *Temporary, llvm::Value *Addr,
+ llvm::Value *CondPtr)
+ : Temporary(Temporary), Addr(Addr), CondPtr(CondPtr) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::BasicBlock *CondEnd = 0;
+
+ // If this is a conditional temporary, we need to check the condition
+ // boolean and only call the destructor if it's true.
+ if (CondPtr) {
+ llvm::BasicBlock *CondBlock =
+ CGF.createBasicBlock("temp.cond-dtor.call");
+ CondEnd = CGF.createBasicBlock("temp.cond-dtor.cont");
+
+ llvm::Value *Cond = CGF.Builder.CreateLoad(CondPtr);
+ CGF.Builder.CreateCondBr(Cond, CondBlock, CondEnd);
+ CGF.EmitBlock(CondBlock);
+ }
+
+ CGF.EmitCXXDestructorCall(Temporary->getDestructor(),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ Addr);
+
+ if (CondPtr) {
+ // Reset the condition to false.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CondPtr);
+ CGF.EmitBlock(CondEnd);
+ }
+ }
+ };
+}
+
+/// Emits all the code to cause the given temporary to be cleaned up.
+void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
+ llvm::Value *Ptr) {
+ llvm::AllocaInst *CondPtr = 0;
+
+ // Check if temporaries need to be conditional. If so, we'll create a
+ // condition boolean, initialize it to 0 and
+ if (ConditionalBranchLevel != 0) {
+ CondPtr = CreateTempAlloca(llvm::Type::getInt1Ty(VMContext), "cond");
+
+ // Initialize it to false. This initialization takes place right after
+ // the alloca insert point.
+ InitTempAlloca(CondPtr, llvm::ConstantInt::getFalse(VMContext));
+
+ // Now set it to true.
+ Builder.CreateStore(Builder.getTrue(), CondPtr);
+ }
+
+ EHStack.pushCleanup<DestroyTemporary>(NormalAndEHCleanup,
+ Temporary, Ptr, CondPtr);
+}
+
+RValue
+CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
+ llvm::Value *AggLoc,
+ bool IsAggLocVolatile,
+ bool IsInitializer) {
+ RunCleanupsScope Scope(*this);
+ return EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
+ /*IgnoreResult=*/false, IsInitializer);
+}
+
+LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue(
+ const CXXExprWithTemporaries *E) {
+ RunCleanupsScope Scope(*this);
+ return EmitLValue(E->getSubExpr());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
new file mode 100644
index 0000000..56acfc8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
@@ -0,0 +1,488 @@
+//===--- CGVTT.cpp - Emit LLVM Code for C++ VTTs --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of VTTs (vtable tables).
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "clang/AST/RecordLayout.h"
+using namespace clang;
+using namespace CodeGen;
+
+#define D1(x)
+
+namespace {
+
+/// VTT builder - Class for building VTT layout information.
+class VTTBuilder {
+
+ CodeGenModule &CGM;
+
+ /// MostDerivedClass - The most derived class for which we're building this
+ /// vtable.
+ const CXXRecordDecl *MostDerivedClass;
+
+ typedef llvm::SmallVector<llvm::Constant *, 64> VTTComponentsVectorTy;
+
+ /// VTTComponents - The VTT components.
+ VTTComponentsVectorTy VTTComponents;
+
+ /// MostDerivedClassLayout - the AST record layout of the most derived class.
+ const ASTRecordLayout &MostDerivedClassLayout;
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy;
+
+ /// SubVTTIndicies - The sub-VTT indices for the bases of the most derived
+ /// class.
+ llvm::DenseMap<BaseSubobject, uint64_t> SubVTTIndicies;
+
+ /// SecondaryVirtualPointerIndices - The secondary virtual pointer indices of
+ /// all subobjects of the most derived class.
+ llvm::DenseMap<BaseSubobject, uint64_t> SecondaryVirtualPointerIndices;
+
+ /// GenerateDefinition - Whether the VTT builder should generate LLVM IR for
+ /// the VTT.
+ bool GenerateDefinition;
+
+ /// GetAddrOfVTable - Returns the address of the vtable for the base class in
+ /// the given vtable class.
+ ///
+ /// \param AddressPoints - If the returned vtable is a construction vtable,
+ /// this will hold the address points for it.
+ llvm::Constant *GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual,
+ AddressPointsMapTy& AddressPoints);
+
+ /// AddVTablePointer - Add a vtable pointer to the VTT currently being built.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints);
+
+ /// LayoutSecondaryVTTs - Lay out the secondary VTTs of the given base
+ /// subobject.
+ void LayoutSecondaryVTTs(BaseSubobject Base);
+
+ /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers
+ /// for the given base subobject.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers
+ /// for the given base subobject.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ llvm::Constant *VTable,
+ const AddressPointsMapTy& AddressPoints);
+
+ /// LayoutVirtualVTTs - Lay out the VTTs for the virtual base classes of the
+ /// given record decl.
+ void LayoutVirtualVTTs(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutVTT - Will lay out the VTT for the given subobject, including any
+ /// secondary VTTs, secondary virtual pointers and virtual VTTs.
+ void LayoutVTT(BaseSubobject Base, bool BaseIsVirtual);
+
+public:
+ VTTBuilder(CodeGenModule &CGM, const CXXRecordDecl *MostDerivedClass,
+ bool GenerateDefinition);
+
+ // getVTTComponents - Returns a reference to the VTT components.
+ const VTTComponentsVectorTy &getVTTComponents() const {
+ return VTTComponents;
+ }
+
+ /// getSubVTTIndicies - Returns a reference to the sub-VTT indices.
+ const llvm::DenseMap<BaseSubobject, uint64_t> &getSubVTTIndicies() const {
+ return SubVTTIndicies;
+ }
+
+ /// getSecondaryVirtualPointerIndices - Returns a reference to the secondary
+ /// virtual pointer indices.
+ const llvm::DenseMap<BaseSubobject, uint64_t> &
+ getSecondaryVirtualPointerIndices() const {
+ return SecondaryVirtualPointerIndices;
+ }
+
+};
+
+VTTBuilder::VTTBuilder(CodeGenModule &CGM,
+ const CXXRecordDecl *MostDerivedClass,
+ bool GenerateDefinition)
+ : CGM(CGM), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassLayout(CGM.getContext().getASTRecordLayout(MostDerivedClass)),
+ GenerateDefinition(GenerateDefinition) {
+
+ // Lay out this VTT.
+ LayoutVTT(BaseSubobject(MostDerivedClass, 0), /*BaseIsVirtual=*/false);
+}
+
+llvm::Constant *
+VTTBuilder::GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual,
+ AddressPointsMapTy& AddressPoints) {
+ if (!GenerateDefinition)
+ return 0;
+
+ if (Base.getBase() == MostDerivedClass) {
+ assert(Base.getBaseOffset() == 0 &&
+ "Most derived class vtable must have a zero offset!");
+ // This is a regular vtable.
+ return CGM.getVTables().GetAddrOfVTable(MostDerivedClass);
+ }
+
+ return CGM.getVTables().GenerateConstructionVTable(MostDerivedClass,
+ Base, BaseIsVirtual,
+ AddressPoints);
+}
+
+void VTTBuilder::AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints) {
+ // Store the vtable pointer index if we're generating the primary VTT.
+ if (VTableClass == MostDerivedClass) {
+ assert(!SecondaryVirtualPointerIndices.count(Base) &&
+ "A virtual pointer index already exists for this base subobject!");
+ SecondaryVirtualPointerIndices[Base] = VTTComponents.size();
+ }
+
+ if (!GenerateDefinition) {
+ VTTComponents.push_back(0);
+ return;
+ }
+
+ uint64_t AddressPoint;
+ if (VTableClass != MostDerivedClass) {
+ // The vtable is a construction vtable, look in the construction vtable
+ // address points.
+ AddressPoint = AddressPoints.lookup(Base);
+ assert(AddressPoint != 0 && "Did not find ctor vtable address point!");
+ } else {
+ // Just get the address point for the regular vtable.
+ AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass);
+ assert(AddressPoint != 0 && "Did not find vtable address point!");
+ }
+
+ if (!AddressPoint) AddressPoint = 0;
+
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0),
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()),
+ AddressPoint)
+ };
+
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Idxs, 2);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+
+ VTTComponents.push_back(Init);
+}
+
+void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ // Don't layout virtual bases.
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ uint64_t BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ // Layout the VTT for this base.
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
+ }
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints,
+ VisitedVirtualBasesSetTy &VBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // We're not interested in bases that don't have virtual bases, and not
+ // morally virtual bases.
+ if (!RD->getNumVBases() && !BaseIsMorallyVirtual)
+ return;
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers are present for all bases with either
+ // virtual bases or virtual function declarations overridden along a
+ // virtual path.
+ //
+ // If the base class is not dynamic, we don't want to add it, nor any
+ // of its base classes.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
+ bool BaseDeclIsNonVirtualPrimaryBase = false;
+ uint64_t BaseOffset;
+ if (I->isVirtual()) {
+ // Ignore virtual bases that we've already visited.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseDeclIsMorallyVirtual = true;
+ } else {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+
+ if (!Layout.getPrimaryBaseWasVirtual() &&
+ Layout.getPrimaryBase() == BaseDecl)
+ BaseDeclIsNonVirtualPrimaryBase = true;
+ }
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers: for each base class X which (a) has virtual
+ // bases or is reachable along a virtual path from D, and (b) is not a
+ // non-virtual primary base, the address of the virtual table for X-in-D
+ // or an appropriate construction virtual table.
+ if (!BaseDeclIsNonVirtualPrimaryBase &&
+ (BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) {
+ // Add the vtable pointer.
+ AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTable, VTableClass,
+ AddressPoints);
+ }
+
+ // And lay out the secondary virtual pointers for the base class.
+ LayoutSecondaryVirtualPointers(BaseSubobject(BaseDecl, BaseOffset),
+ BaseDeclIsMorallyVirtual, VTable,
+ VTableClass, AddressPoints, VBases);
+ }
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ llvm::Constant *VTable,
+ const AddressPointsMapTy& AddressPoints) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutSecondaryVirtualPointers(Base, /*BaseIsMorallyVirtual=*/false,
+ VTable, Base.getBase(), AddressPoints, VBases);
+}
+
+void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a virtual base.
+ if (I->isVirtual()) {
+ // Check if we've seen this base before.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ uint64_t BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
+ }
+
+ // We only need to layout virtual VTTs for this base if it actually has
+ // virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVirtualVTTs(BaseDecl, VBases);
+ }
+}
+
+void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Itanium C++ ABI 2.6.2:
+ // An array of virtual table addresses, called the VTT, is declared for
+ // each class type that has indirect or direct virtual base classes.
+ if (RD->getNumVBases() == 0)
+ return;
+
+ bool IsPrimaryVTT = Base.getBase() == MostDerivedClass;
+
+ if (!IsPrimaryVTT) {
+ // Remember the sub-VTT index.
+ SubVTTIndicies[Base] = VTTComponents.size();
+ }
+
+ AddressPointsMapTy AddressPoints;
+ llvm::Constant *VTable = GetAddrOfVTable(Base, BaseIsVirtual, AddressPoints);
+
+ // Add the primary vtable pointer.
+ AddVTablePointer(Base, VTable, RD, AddressPoints);
+
+ // Add the secondary VTTs.
+ LayoutSecondaryVTTs(Base);
+
+ // Add the secondary virtual pointers.
+ LayoutSecondaryVirtualPointers(Base, VTable, AddressPoints);
+
+ // If this is the primary VTT, we want to lay out virtual VTTs as well.
+ if (IsPrimaryVTT) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutVirtualVTTs(Base.getBase(), VBases);
+ }
+}
+
+}
+
+llvm::GlobalVariable *
+CodeGenVTables::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
+ bool GenerateDefinition,
+ const CXXRecordDecl *RD) {
+ // Only classes that have virtual bases need a VTT.
+ if (RD->getNumVBases() == 0)
+ return 0;
+
+ llvm::SmallString<256> OutName;
+ CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ D1(printf("vtt %s\n", RD->getNameAsCString()));
+
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
+ if (GV == 0 || GV->isDeclaration()) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+
+ VTTBuilder Builder(CGM, RD, GenerateDefinition);
+
+ const llvm::ArrayType *Type =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+
+ llvm::Constant *Init = 0;
+ if (GenerateDefinition)
+ Init = llvm::ConstantArray::get(Type, Builder.getVTTComponents().data(),
+ Builder.getVTTComponents().size());
+
+ llvm::GlobalVariable *OldGV = GV;
+ GV = new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true,
+ Linkage, Init, Name);
+ CGM.setGlobalVisibility(GV, RD);
+
+ if (OldGV) {
+ GV->takeName(OldGV);
+ llvm::Constant *NewPtr =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtr);
+ OldGV->eraseFromParent();
+ }
+ }
+
+ return GV;
+}
+
+llvm::GlobalVariable *CodeGenVTables::getVTT(const CXXRecordDecl *RD) {
+ return GenerateVTT(llvm::GlobalValue::ExternalLinkage,
+ /*GenerateDefinition=*/false, RD);
+}
+
+bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // We don't have any virtual bases, just return early.
+ if (!MD->getParent()->getNumVBases())
+ return false;
+
+ // Check if we have a base constructor.
+ if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
+ return true;
+
+ // Check if we have a base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return true;
+
+ return false;
+}
+
+uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ BaseSubobjectPairTy ClassSubobjectPair(RD, Base);
+
+ SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassSubobjectPair);
+ if (I != SubVTTIndicies.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
+
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSubVTTIndicies().begin(),
+ E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
+ // Insert all indices.
+ BaseSubobjectPairTy ClassSubobjectPair(RD, I->first);
+
+ SubVTTIndicies.insert(std::make_pair(ClassSubobjectPair, I->second));
+ }
+
+ I = SubVTTIndicies.find(ClassSubobjectPair);
+ assert(I != SubVTTIndicies.end() && "Did not find index!");
+
+ return I->second;
+}
+
+uint64_t
+CodeGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ SecondaryVirtualPointerIndicesMapTy::iterator I =
+ SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+
+ if (I != SecondaryVirtualPointerIndices.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
+
+ // Insert all secondary vpointer indices.
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSecondaryVirtualPointerIndices().begin(),
+ E = Builder.getSecondaryVirtualPointerIndices().end(); I != E; ++I) {
+ std::pair<const CXXRecordDecl *, BaseSubobject> Pair =
+ std::make_pair(RD, I->first);
+
+ SecondaryVirtualPointerIndices.insert(std::make_pair(Pair, I->second));
+ }
+
+ I = SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+ assert(I != SecondaryVirtualPointerIndices.end() && "Did not find index!");
+
+ return I->second;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
new file mode 100644
index 0000000..bed4670
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -0,0 +1,3050 @@
+//===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+/// BaseOffset - Represents an offset from a derived class to a direct or
+/// indirect base class.
+struct BaseOffset {
+ /// DerivedClass - The derived class.
+ const CXXRecordDecl *DerivedClass;
+
+ /// VirtualBase - If the path from the derived class to the base class
+ /// involves a virtual base class, this holds its declaration.
+ const CXXRecordDecl *VirtualBase;
+
+ /// NonVirtualOffset - The offset from the derived class to the base class.
+ /// (Or the offset from the virtual base class to the base class, if the
+ /// path from the derived class to the base class involves a virtual base
+ /// class.
+ int64_t NonVirtualOffset;
+
+ BaseOffset() : DerivedClass(0), VirtualBase(0), NonVirtualOffset(0) { }
+ BaseOffset(const CXXRecordDecl *DerivedClass,
+ const CXXRecordDecl *VirtualBase, int64_t NonVirtualOffset)
+ : DerivedClass(DerivedClass), VirtualBase(VirtualBase),
+ NonVirtualOffset(NonVirtualOffset) { }
+
+ bool isEmpty() const { return !NonVirtualOffset && !VirtualBase; }
+};
+
+/// FinalOverriders - Contains the final overrider member functions for all
+/// member functions in the base subobjects of a class.
+class FinalOverriders {
+public:
+ /// OverriderInfo - Information about a final overrider.
+ struct OverriderInfo {
+ /// Method - The method decl of the overrider.
+ const CXXMethodDecl *Method;
+
+ /// Offset - the base offset of the overrider in the layout class.
+ uint64_t Offset;
+
+ OverriderInfo() : Method(0), Offset(0) { }
+ };
+
+private:
+ /// MostDerivedClass - The most derived class for which the final overriders
+ /// are stored.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// MostDerivedClassOffset - If we're building final overriders for a
+ /// construction vtable, this holds the offset from the layout class to the
+ /// most derived class.
+ const uint64_t MostDerivedClassOffset;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if the final overriders are for a
+ /// construction vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ ASTContext &Context;
+
+ /// MostDerivedClassLayout - the AST record layout of the most derived class.
+ const ASTRecordLayout &MostDerivedClassLayout;
+
+ /// MethodBaseOffsetPairTy - Uniquely identifies a member function
+ /// in a base subobject.
+ typedef std::pair<const CXXMethodDecl *, uint64_t> MethodBaseOffsetPairTy;
+
+ typedef llvm::DenseMap<MethodBaseOffsetPairTy,
+ OverriderInfo> OverridersMapTy;
+
+ /// OverridersMap - The final overriders for all virtual member functions of
+ /// all the base subobjects of the most derived class.
+ OverridersMapTy OverridersMap;
+
+ /// SubobjectsToOffsetsMapTy - A mapping from a base subobject (represented
+ /// as a record decl and a subobject number) and its offsets in the most
+ /// derived class as well as the layout class.
+ typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>,
+ uint64_t> SubobjectOffsetMapTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy;
+
+ /// ComputeBaseOffsets - Compute the offsets for all base subobjects of the
+ /// given base.
+ void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ uint64_t OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts);
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// dump - dump the final overriders for a base subobject, and all its direct
+ /// and indirect base subobjects.
+ void dump(llvm::raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy& VisitedVirtualBases);
+
+public:
+ FinalOverriders(const CXXRecordDecl *MostDerivedClass,
+ uint64_t MostDerivedClassOffset,
+ const CXXRecordDecl *LayoutClass);
+
+ /// getOverrider - Get the final overrider for the given method declaration in
+ /// the subobject with the given base offset.
+ OverriderInfo getOverrider(const CXXMethodDecl *MD,
+ uint64_t BaseOffset) const {
+ assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) &&
+ "Did not find overrider!");
+
+ return OverridersMap.lookup(std::make_pair(MD, BaseOffset));
+ }
+
+ /// dump - dump the final overriders.
+ void dump() {
+ VisitedVirtualBasesSetTy VisitedVirtualBases;
+ dump(llvm::errs(), BaseSubobject(MostDerivedClass, 0), VisitedVirtualBases);
+ }
+
+};
+
+#define DUMP_OVERRIDERS 0
+
+FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
+ uint64_t MostDerivedClassOffset,
+ const CXXRecordDecl *LayoutClass)
+ : MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()),
+ MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) {
+
+ // Compute base offsets.
+ SubobjectOffsetMapTy SubobjectOffsets;
+ SubobjectOffsetMapTy SubobjectLayoutClassOffsets;
+ SubobjectCountMapTy SubobjectCounts;
+ ComputeBaseOffsets(BaseSubobject(MostDerivedClass, 0), /*IsVirtual=*/false,
+ MostDerivedClassOffset, SubobjectOffsets,
+ SubobjectLayoutClassOffsets, SubobjectCounts);
+
+ // Get the the final overriders.
+ CXXFinalOverriderMap FinalOverriders;
+ MostDerivedClass->getFinalOverriders(FinalOverriders);
+
+ for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(),
+ E = FinalOverriders.end(); I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ const OverridingMethods& Methods = I->second;
+
+ for (OverridingMethods::const_iterator I = Methods.begin(),
+ E = Methods.end(); I != E; ++I) {
+ unsigned SubobjectNumber = I->first;
+ assert(SubobjectOffsets.count(std::make_pair(MD->getParent(),
+ SubobjectNumber)) &&
+ "Did not find subobject offset!");
+
+ uint64_t BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
+ SubobjectNumber)];
+
+ assert(I->second.size() == 1 && "Final overrider is not unique!");
+ const UniqueVirtualMethod &Method = I->second.front();
+
+ const CXXRecordDecl *OverriderRD = Method.Method->getParent();
+ assert(SubobjectLayoutClassOffsets.count(
+ std::make_pair(OverriderRD, Method.Subobject))
+ && "Did not find subobject offset!");
+ uint64_t OverriderOffset =
+ SubobjectLayoutClassOffsets[std::make_pair(OverriderRD,
+ Method.Subobject)];
+
+ OverriderInfo& Overrider = OverridersMap[std::make_pair(MD, BaseOffset)];
+ assert(!Overrider.Method && "Overrider should not exist yet!");
+
+ Overrider.Offset = OverriderOffset;
+ Overrider.Method = Method.Method;
+ }
+ }
+
+#if DUMP_OVERRIDERS
+ // And dump them (for now).
+ dump();
+#endif
+}
+
+static BaseOffset ComputeBaseOffset(ASTContext &Context,
+ const CXXRecordDecl *DerivedRD,
+ const CXXBasePath &Path) {
+ int64_t NonVirtualOffset = 0;
+
+ unsigned NonVirtualStart = 0;
+ const CXXRecordDecl *VirtualBase = 0;
+
+ // First, look for the virtual base class.
+ for (unsigned I = 0, E = Path.size(); I != E; ++I) {
+ const CXXBasePathElement &Element = Path[I];
+
+ if (Element.Base->isVirtual()) {
+ // FIXME: Can we break when we find the first virtual base?
+ // (If we can't, can't we just iterate over the path in reverse order?)
+ NonVirtualStart = I + 1;
+ QualType VBaseType = Element.Base->getType();
+ VirtualBase =
+ cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl());
+ }
+ }
+
+ // Now compute the non-virtual offset.
+ for (unsigned I = NonVirtualStart, E = Path.size(); I != E; ++I) {
+ const CXXBasePathElement &Element = Path[I];
+
+ // Check the base class offset.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
+
+ const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>();
+ const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl());
+
+ NonVirtualOffset += Layout.getBaseClassOffset(Base);
+ }
+
+ // FIXME: This should probably use CharUnits or something. Maybe we should
+ // even change the base offsets in ASTRecordLayout to be specified in
+ // CharUnits.
+ return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset / 8);
+
+}
+
+static BaseOffset ComputeBaseOffset(ASTContext &Context,
+ const CXXRecordDecl *BaseRD,
+ const CXXRecordDecl *DerivedRD) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false,
+ /*RecordPaths=*/true, /*DetectVirtual=*/false);
+
+ if (!const_cast<CXXRecordDecl *>(DerivedRD)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
+ assert(false && "Class must be derived from the passed in base class!");
+ return BaseOffset();
+ }
+
+ return ComputeBaseOffset(Context, DerivedRD, Paths.front());
+}
+
+static BaseOffset
+ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
+ const CXXMethodDecl *DerivedMD,
+ const CXXMethodDecl *BaseMD) {
+ const FunctionType *BaseFT = BaseMD->getType()->getAs<FunctionType>();
+ const FunctionType *DerivedFT = DerivedMD->getType()->getAs<FunctionType>();
+
+ // Canonicalize the return types.
+ CanQualType CanDerivedReturnType =
+ Context.getCanonicalType(DerivedFT->getResultType());
+ CanQualType CanBaseReturnType =
+ Context.getCanonicalType(BaseFT->getResultType());
+
+ assert(CanDerivedReturnType->getTypeClass() ==
+ CanBaseReturnType->getTypeClass() &&
+ "Types must have same type class!");
+
+ if (CanDerivedReturnType == CanBaseReturnType) {
+ // No adjustment needed.
+ return BaseOffset();
+ }
+
+ if (isa<ReferenceType>(CanDerivedReturnType)) {
+ CanDerivedReturnType =
+ CanDerivedReturnType->getAs<ReferenceType>()->getPointeeType();
+ CanBaseReturnType =
+ CanBaseReturnType->getAs<ReferenceType>()->getPointeeType();
+ } else if (isa<PointerType>(CanDerivedReturnType)) {
+ CanDerivedReturnType =
+ CanDerivedReturnType->getAs<PointerType>()->getPointeeType();
+ CanBaseReturnType =
+ CanBaseReturnType->getAs<PointerType>()->getPointeeType();
+ } else {
+ assert(false && "Unexpected return type!");
+ }
+
+ // We need to compare unqualified types here; consider
+ // const T *Base::foo();
+ // T *Derived::foo();
+ if (CanDerivedReturnType.getUnqualifiedType() ==
+ CanBaseReturnType.getUnqualifiedType()) {
+ // No adjustment needed.
+ return BaseOffset();
+ }
+
+ const CXXRecordDecl *DerivedRD =
+ cast<CXXRecordDecl>(cast<RecordType>(CanDerivedReturnType)->getDecl());
+
+ const CXXRecordDecl *BaseRD =
+ cast<CXXRecordDecl>(cast<RecordType>(CanBaseReturnType)->getDecl());
+
+ return ComputeBaseOffset(Context, BaseRD, DerivedRD);
+}
+
+void
+FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ uint64_t OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ unsigned SubobjectNumber = 0;
+ if (!IsVirtual)
+ SubobjectNumber = ++SubobjectCounts[RD];
+
+ // Set up the subobject to offset mapping.
+ assert(!SubobjectOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+ assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+
+ SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] =
+ Base.getBaseOffset();
+ SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] =
+ OffsetInLayoutClass;
+
+ // Traverse our bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t BaseOffset;
+ uint64_t BaseOffsetInLayoutClass;
+ if (I->isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
+ continue;
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ uint64_t Offset = Layout.getBaseClassOffset(BaseDecl);
+
+ BaseOffset = Base.getBaseOffset() + Offset;
+ BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
+ }
+
+ ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset), I->isVirtual(),
+ BaseOffsetInLayoutClass, SubobjectOffsets,
+ SubobjectLayoutClassOffsets, SubobjectCounts);
+ }
+}
+
+void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy &VisitedVirtualBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore bases that don't have any virtual member functions.
+ if (!BaseDecl->isPolymorphic())
+ continue;
+
+ uint64_t BaseOffset;
+ if (I->isVirtual()) {
+ if (!VisitedVirtualBases.insert(BaseDecl)) {
+ // We've visited this base before.
+ continue;
+ }
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffset = Layout.getBaseClassOffset(BaseDecl) +
+ Base.getBaseOffset();
+ }
+
+ dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
+ }
+
+ Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", ";
+ Out << Base.getBaseOffset() / 8 << ")\n";
+
+ // Now dump the overriders for this base subobject.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset());
+
+ Out << " " << MD->getQualifiedNameAsString() << " - (";
+ Out << Overrider.Method->getQualifiedNameAsString();
+ Out << ", " << ", " << Overrider.Offset / 8 << ')';
+
+ BaseOffset Offset;
+ if (!Overrider.Method->isPure())
+ Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+
+ if (!Offset.isEmpty()) {
+ Out << " [ret-adj: ";
+ if (Offset.VirtualBase)
+ Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
+
+ Out << Offset.NonVirtualOffset << " nv]";
+ }
+
+ Out << "\n";
+ }
+}
+
+/// VTableComponent - Represents a single component in a vtable.
+class VTableComponent {
+public:
+ enum Kind {
+ CK_VCallOffset,
+ CK_VBaseOffset,
+ CK_OffsetToTop,
+ CK_RTTI,
+ CK_FunctionPointer,
+
+ /// CK_CompleteDtorPointer - A pointer to the complete destructor.
+ CK_CompleteDtorPointer,
+
+ /// CK_DeletingDtorPointer - A pointer to the deleting destructor.
+ CK_DeletingDtorPointer,
+
+ /// CK_UnusedFunctionPointer - In some cases, a vtable function pointer
+ /// will end up never being called. Such vtable function pointers are
+ /// represented as a CK_UnusedFunctionPointer.
+ CK_UnusedFunctionPointer
+ };
+
+ static VTableComponent MakeVCallOffset(int64_t Offset) {
+ return VTableComponent(CK_VCallOffset, Offset);
+ }
+
+ static VTableComponent MakeVBaseOffset(int64_t Offset) {
+ return VTableComponent(CK_VBaseOffset, Offset);
+ }
+
+ static VTableComponent MakeOffsetToTop(int64_t Offset) {
+ return VTableComponent(CK_OffsetToTop, Offset);
+ }
+
+ static VTableComponent MakeRTTI(const CXXRecordDecl *RD) {
+ return VTableComponent(CK_RTTI, reinterpret_cast<uintptr_t>(RD));
+ }
+
+ static VTableComponent MakeFunction(const CXXMethodDecl *MD) {
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Don't use MakeFunction with destructors!");
+
+ return VTableComponent(CK_FunctionPointer,
+ reinterpret_cast<uintptr_t>(MD));
+ }
+
+ static VTableComponent MakeCompleteDtor(const CXXDestructorDecl *DD) {
+ return VTableComponent(CK_CompleteDtorPointer,
+ reinterpret_cast<uintptr_t>(DD));
+ }
+
+ static VTableComponent MakeDeletingDtor(const CXXDestructorDecl *DD) {
+ return VTableComponent(CK_DeletingDtorPointer,
+ reinterpret_cast<uintptr_t>(DD));
+ }
+
+ static VTableComponent MakeUnusedFunction(const CXXMethodDecl *MD) {
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Don't use MakeUnusedFunction with destructors!");
+ return VTableComponent(CK_UnusedFunctionPointer,
+ reinterpret_cast<uintptr_t>(MD));
+ }
+
+ static VTableComponent getFromOpaqueInteger(uint64_t I) {
+ return VTableComponent(I);
+ }
+
+ /// getKind - Get the kind of this vtable component.
+ Kind getKind() const {
+ return (Kind)(Value & 0x7);
+ }
+
+ int64_t getVCallOffset() const {
+ assert(getKind() == CK_VCallOffset && "Invalid component kind!");
+
+ return getOffset();
+ }
+
+ int64_t getVBaseOffset() const {
+ assert(getKind() == CK_VBaseOffset && "Invalid component kind!");
+
+ return getOffset();
+ }
+
+ int64_t getOffsetToTop() const {
+ assert(getKind() == CK_OffsetToTop && "Invalid component kind!");
+
+ return getOffset();
+ }
+
+ const CXXRecordDecl *getRTTIDecl() const {
+ assert(getKind() == CK_RTTI && "Invalid component kind!");
+
+ return reinterpret_cast<CXXRecordDecl *>(getPointer());
+ }
+
+ const CXXMethodDecl *getFunctionDecl() const {
+ assert(getKind() == CK_FunctionPointer);
+
+ return reinterpret_cast<CXXMethodDecl *>(getPointer());
+ }
+
+ const CXXDestructorDecl *getDestructorDecl() const {
+ assert((getKind() == CK_CompleteDtorPointer ||
+ getKind() == CK_DeletingDtorPointer) && "Invalid component kind!");
+
+ return reinterpret_cast<CXXDestructorDecl *>(getPointer());
+ }
+
+ const CXXMethodDecl *getUnusedFunctionDecl() const {
+ assert(getKind() == CK_UnusedFunctionPointer);
+
+ return reinterpret_cast<CXXMethodDecl *>(getPointer());
+ }
+
+private:
+ VTableComponent(Kind ComponentKind, int64_t Offset) {
+ assert((ComponentKind == CK_VCallOffset ||
+ ComponentKind == CK_VBaseOffset ||
+ ComponentKind == CK_OffsetToTop) && "Invalid component kind!");
+ assert(Offset <= ((1LL << 56) - 1) && "Offset is too big!");
+
+ Value = ((Offset << 3) | ComponentKind);
+ }
+
+ VTableComponent(Kind ComponentKind, uintptr_t Ptr) {
+ assert((ComponentKind == CK_RTTI ||
+ ComponentKind == CK_FunctionPointer ||
+ ComponentKind == CK_CompleteDtorPointer ||
+ ComponentKind == CK_DeletingDtorPointer ||
+ ComponentKind == CK_UnusedFunctionPointer) &&
+ "Invalid component kind!");
+
+ assert((Ptr & 7) == 0 && "Pointer not sufficiently aligned!");
+
+ Value = Ptr | ComponentKind;
+ }
+
+ int64_t getOffset() const {
+ assert((getKind() == CK_VCallOffset || getKind() == CK_VBaseOffset ||
+ getKind() == CK_OffsetToTop) && "Invalid component kind!");
+
+ return Value >> 3;
+ }
+
+ uintptr_t getPointer() const {
+ assert((getKind() == CK_RTTI ||
+ getKind() == CK_FunctionPointer ||
+ getKind() == CK_CompleteDtorPointer ||
+ getKind() == CK_DeletingDtorPointer ||
+ getKind() == CK_UnusedFunctionPointer) &&
+ "Invalid component kind!");
+
+ return static_cast<uintptr_t>(Value & ~7ULL);
+ }
+
+ explicit VTableComponent(uint64_t Value)
+ : Value(Value) { }
+
+ /// The kind is stored in the lower 3 bits of the value. For offsets, we
+ /// make use of the facts that classes can't be larger than 2^55 bytes,
+ /// so we store the offset in the lower part of the 61 bytes that remain.
+ /// (The reason that we're not simply using a PointerIntPair here is that we
+ /// need the offsets to be 64-bit, even when on a 32-bit machine).
+ int64_t Value;
+};
+
+/// VCallOffsetMap - Keeps track of vcall offsets when building a vtable.
+struct VCallOffsetMap {
+
+ typedef std::pair<const CXXMethodDecl *, int64_t> MethodAndOffsetPairTy;
+
+ /// Offsets - Keeps track of methods and their offsets.
+ // FIXME: This should be a real map and not a vector.
+ llvm::SmallVector<MethodAndOffsetPairTy, 16> Offsets;
+
+ /// MethodsCanShareVCallOffset - Returns whether two virtual member functions
+ /// can share the same vcall offset.
+ static bool MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS);
+
+public:
+ /// AddVCallOffset - Adds a vcall offset to the map. Returns true if the
+ /// add was successful, or false if there was already a member function with
+ /// the same signature in the map.
+ bool AddVCallOffset(const CXXMethodDecl *MD, int64_t OffsetOffset);
+
+ /// getVCallOffsetOffset - Returns the vcall offset offset (relative to the
+ /// vtable address point) for the given virtual member function.
+ int64_t getVCallOffsetOffset(const CXXMethodDecl *MD);
+
+ // empty - Return whether the offset map is empty or not.
+ bool empty() const { return Offsets.empty(); }
+};
+
+static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ ASTContext &C = LHS->getASTContext(); // TODO: thread this down
+ CanQual<FunctionProtoType>
+ LT = C.getCanonicalType(LHS->getType()).getAs<FunctionProtoType>(),
+ RT = C.getCanonicalType(RHS->getType()).getAs<FunctionProtoType>();
+
+ // Fast-path matches in the canonical types.
+ if (LT == RT) return true;
+
+ // Force the signatures to match. We can't rely on the overrides
+ // list here because there isn't necessarily an inheritance
+ // relationship between the two methods.
+ if (LT.getQualifiers() != RT.getQualifiers() ||
+ LT->getNumArgs() != RT->getNumArgs())
+ return false;
+ for (unsigned I = 0, E = LT->getNumArgs(); I != E; ++I)
+ if (LT->getArgType(I) != RT->getArgType(I))
+ return false;
+ return true;
+}
+
+bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ assert(LHS->isVirtual() && "LHS must be virtual!");
+ assert(RHS->isVirtual() && "LHS must be virtual!");
+
+ // A destructor can share a vcall offset with another destructor.
+ if (isa<CXXDestructorDecl>(LHS))
+ return isa<CXXDestructorDecl>(RHS);
+
+ // FIXME: We need to check more things here.
+
+ // The methods must have the same name.
+ DeclarationName LHSName = LHS->getDeclName();
+ DeclarationName RHSName = RHS->getDeclName();
+ if (LHSName != RHSName)
+ return false;
+
+ // And the same signatures.
+ return HasSameVirtualSignature(LHS, RHS);
+}
+
+bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
+ int64_t OffsetOffset) {
+ // Check if we can reuse an offset.
+ for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
+ if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ return false;
+ }
+
+ // Add the offset.
+ Offsets.push_back(MethodAndOffsetPairTy(MD, OffsetOffset));
+ return true;
+}
+
+int64_t VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
+ // Look for an offset.
+ for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
+ if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ return Offsets[I].second;
+ }
+
+ assert(false && "Should always find a vcall offset offset!");
+ return 0;
+}
+
+/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets.
+class VCallAndVBaseOffsetBuilder {
+public:
+ typedef llvm::DenseMap<const CXXRecordDecl *, int64_t>
+ VBaseOffsetOffsetsMapTy;
+
+private:
+ /// MostDerivedClass - The most derived class for which we're building vcall
+ /// and vbase offsets.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// Components - vcall and vbase offset components
+ typedef llvm::SmallVector<VTableComponent, 64> VTableComponentVectorTy;
+ VTableComponentVectorTy Components;
+
+ /// VisitedVirtualBases - Visited virtual bases.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ /// VCallOffsets - Keeps track of vcall offsets.
+ VCallOffsetMap VCallOffsets;
+
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets,
+ /// relative to the address point.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ /// (Can be null when we're not building a vtable of the most derived class).
+ const FinalOverriders *Overriders;
+
+ /// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the
+ /// given base subobject.
+ void AddVCallAndVBaseOffsets(BaseSubobject Base, bool BaseIsVirtual,
+ uint64_t RealBaseOffset);
+
+ /// AddVCallOffsets - Add vcall offsets for the given base subobject.
+ void AddVCallOffsets(BaseSubobject Base, uint64_t VBaseOffset);
+
+ /// AddVBaseOffsets - Add vbase offsets for the given class.
+ void AddVBaseOffsets(const CXXRecordDecl *Base, uint64_t OffsetInLayoutClass);
+
+ /// getCurrentOffsetOffset - Get the current vcall or vbase offset offset in
+ /// bytes, relative to the vtable address point.
+ int64_t getCurrentOffsetOffset() const;
+
+public:
+ VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
+ const CXXRecordDecl *LayoutClass,
+ const FinalOverriders *Overriders,
+ BaseSubobject Base, bool BaseIsVirtual,
+ uint64_t OffsetInLayoutClass)
+ : MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
+
+ // Add vcall and vbase offsets.
+ AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass);
+ }
+
+ /// Methods for iterating over the components.
+ typedef VTableComponentVectorTy::const_reverse_iterator const_iterator;
+ const_iterator components_begin() const { return Components.rbegin(); }
+ const_iterator components_end() const { return Components.rend(); }
+
+ const VCallOffsetMap &getVCallOffsets() const { return VCallOffsets; }
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+};
+
+void
+VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
+ bool BaseIsVirtual,
+ uint64_t RealBaseOffset) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base.getBase());
+
+ // Itanium C++ ABI 2.5.2:
+ // ..in classes sharing a virtual table with a primary base class, the vcall
+ // and vbase offsets added by the derived class all come before the vcall
+ // and vbase offsets required by the base class, so that the latter may be
+ // laid out as required by the base class without regard to additions from
+ // the derived class(es).
+
+ // (Since we're emitting the vcall and vbase offsets in reverse order, we'll
+ // emit them for the primary base first).
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ bool PrimaryBaseIsVirtual = Layout.getPrimaryBaseWasVirtual();
+
+ uint64_t PrimaryBaseOffset;
+
+ // Get the base offset of the primary base.
+ if (PrimaryBaseIsVirtual) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ }
+
+ AddVCallAndVBaseOffsets(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
+ PrimaryBaseIsVirtual, RealBaseOffset);
+ }
+
+ AddVBaseOffsets(Base.getBase(), RealBaseOffset);
+
+ // We only want to add vcall offsets for virtual bases.
+ if (BaseIsVirtual)
+ AddVCallOffsets(Base, RealBaseOffset);
+}
+
+int64_t VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
+ // OffsetIndex is the index of this vcall or vbase offset, relative to the
+ // vtable address point. (We subtract 3 to account for the information just
+ // above the address point, the RTTI info, the offset to top, and the
+ // vcall offset itself).
+ int64_t OffsetIndex = -(int64_t)(3 + Components.size());
+
+ // FIXME: We shouldn't use / 8 here.
+ int64_t OffsetOffset = OffsetIndex *
+ (int64_t)Context.Target.getPointerWidth(0) / 8;
+
+ return OffsetOffset;
+}
+
+void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
+ uint64_t VBaseOffset) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ // Handle the primary base first.
+ // We only want to add vcall offsets if the base is non-virtual; a virtual
+ // primary base will have its vcall and vbase offsets emitted already.
+ if (PrimaryBase && !Layout.getPrimaryBaseWasVirtual()) {
+ // Get the base offset of the primary base.
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
+ VBaseOffset);
+ }
+
+ // Add the vcall offsets.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ int64_t OffsetOffset = getCurrentOffsetOffset();
+
+ // Don't add a vcall offset if we already have one for this member function
+ // signature.
+ if (!VCallOffsets.AddVCallOffset(MD, OffsetOffset))
+ continue;
+
+ int64_t Offset = 0;
+
+ if (Overriders) {
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders->getOverrider(MD, Base.getBaseOffset());
+
+ /// The vcall offset is the offset from the virtual base to the object
+ /// where the function was overridden.
+ // FIXME: We should not use / 8 here.
+ Offset = (int64_t)(Overrider.Offset - VBaseOffset) / 8;
+ }
+
+ Components.push_back(VTableComponent::MakeVCallOffset(Offset));
+ }
+
+ // And iterate over all non-virtual bases (ignoring the primary base).
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ if (BaseDecl == PrimaryBase)
+ continue;
+
+ // Get the base offset of this base.
+ uint64_t BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset), VBaseOffset);
+ }
+}
+
+void VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
+ uint64_t OffsetInLayoutClass) {
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Add vbase offsets.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a virtual base that we haven't visited before.
+ if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
+ // FIXME: We shouldn't use / 8 here.
+ int64_t Offset =
+ (int64_t)(LayoutClassLayout.getVBaseClassOffset(BaseDecl) -
+ OffsetInLayoutClass) / 8;
+
+ // Add the vbase offset offset.
+ assert(!VBaseOffsetOffsets.count(BaseDecl) &&
+ "vbase offset offset already exists!");
+
+ int64_t VBaseOffsetOffset = getCurrentOffsetOffset();
+ VBaseOffsetOffsets.insert(std::make_pair(BaseDecl, VBaseOffsetOffset));
+
+ Components.push_back(VTableComponent::MakeVBaseOffset(Offset));
+ }
+
+ // Check the base class looking for more vbase offsets.
+ AddVBaseOffsets(BaseDecl, OffsetInLayoutClass);
+ }
+}
+
+/// VTableBuilder - Class for building vtable layout information.
+class VTableBuilder {
+public:
+ /// PrimaryBasesSetVectorTy - A set vector of direct and indirect
+ /// primary bases.
+ typedef llvm::SmallSetVector<const CXXRecordDecl *, 8>
+ PrimaryBasesSetVectorTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, int64_t>
+ VBaseOffsetOffsetsMapTy;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t>
+ AddressPointsMapTy;
+
+private:
+ /// VTables - Global vtable information.
+ CodeGenVTables &VTables;
+
+ /// MostDerivedClass - The most derived class for which we're building this
+ /// vtable.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// MostDerivedClassOffset - If we're building a construction vtable, this
+ /// holds the offset from the layout class to the most derived class.
+ const uint64_t MostDerivedClassOffset;
+
+ /// MostDerivedClassIsVirtual - Whether the most derived class is a virtual
+ /// base. (This only makes sense when building a construction vtable).
+ bool MostDerivedClassIsVirtual;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ const FinalOverriders Overriders;
+
+ /// VCallOffsetsForVBases - Keeps track of vcall offsets for the virtual
+ /// bases in this vtable.
+ llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases;
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for
+ /// the most derived class.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
+ /// Components - The components of the vtable being built.
+ llvm::SmallVector<VTableComponent, 64> Components;
+
+ /// AddressPoints - Address points for the vtable being built.
+ AddressPointsMapTy AddressPoints;
+
+ /// MethodInfo - Contains information about a method in a vtable.
+ /// (Used for computing 'this' pointer adjustment thunks.
+ struct MethodInfo {
+ /// BaseOffset - The base offset of this method.
+ const uint64_t BaseOffset;
+
+ /// BaseOffsetInLayoutClass - The base offset in the layout class of this
+ /// method.
+ const uint64_t BaseOffsetInLayoutClass;
+
+ /// VTableIndex - The index in the vtable that this method has.
+ /// (For destructors, this is the index of the complete destructor).
+ const uint64_t VTableIndex;
+
+ MethodInfo(uint64_t BaseOffset, uint64_t BaseOffsetInLayoutClass,
+ uint64_t VTableIndex)
+ : BaseOffset(BaseOffset),
+ BaseOffsetInLayoutClass(BaseOffsetInLayoutClass),
+ VTableIndex(VTableIndex) { }
+
+ MethodInfo() : BaseOffset(0), BaseOffsetInLayoutClass(0), VTableIndex(0) { }
+ };
+
+ typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
+
+ /// MethodInfoMap - The information for all methods in the vtable we're
+ /// currently building.
+ MethodInfoMapTy MethodInfoMap;
+
+ typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy;
+
+ /// VTableThunks - The thunks by vtable index in the vtable currently being
+ /// built.
+ VTableThunksMapTy VTableThunks;
+
+ typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
+
+ /// Thunks - A map that contains all the thunks needed for all methods in the
+ /// most derived class for which the vtable is currently being built.
+ ThunksMapTy Thunks;
+
+ /// AddThunk - Add a thunk for the given method.
+ void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk);
+
+ /// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the
+ /// part of the vtable we're currently building.
+ void ComputeThisAdjustments();
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// PrimaryVirtualBases - All known virtual bases who are a primary base of
+ /// some other base.
+ VisitedVirtualBasesSetTy PrimaryVirtualBases;
+
+ /// ComputeReturnAdjustment - Compute the return adjustment given a return
+ /// adjustment base offset.
+ ReturnAdjustment ComputeReturnAdjustment(BaseOffset Offset);
+
+ /// ComputeThisAdjustmentBaseOffset - Compute the base offset for adjusting
+ /// the 'this' pointer from the base subobject to the derived subobject.
+ BaseOffset ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const;
+
+ /// ComputeThisAdjustment - Compute the 'this' pointer adjustment for the
+ /// given virtual member function, its offset in the layout class and its
+ /// final overrider.
+ ThisAdjustment
+ ComputeThisAdjustment(const CXXMethodDecl *MD,
+ uint64_t BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider);
+
+ /// AddMethod - Add a single virtual member function to the vtable
+ /// components vector.
+ void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment);
+
+ /// IsOverriderUsed - Returns whether the overrider will ever be used in this
+ /// part of the vtable.
+ ///
+ /// Itanium C++ ABI 2.5.2:
+ ///
+ /// struct A { virtual void f(); };
+ /// struct B : virtual public A { int i; };
+ /// struct C : virtual public A { int j; };
+ /// struct D : public B, public C {};
+ ///
+ /// When B and C are declared, A is a primary base in each case, so although
+ /// vcall offsets are allocated in the A-in-B and A-in-C vtables, no this
+ /// adjustment is required and no thunk is generated. However, inside D
+ /// objects, A is no longer a primary base of C, so if we allowed calls to
+ /// C::f() to use the copy of A's vtable in the C subobject, we would need
+ /// to adjust this from C* to B::A*, which would require a third-party
+ /// thunk. Since we require that a call to C::f() first convert to A*,
+ /// C-in-D's copy of A's vtable is never referenced, so this is not
+ /// necessary.
+ bool IsOverriderUsed(const CXXMethodDecl *Overrider,
+ uint64_t BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ uint64_t FirstBaseOffsetInLayoutClass) const;
+
+
+ /// AddMethods - Add the methods of this base subobject and all its
+ /// primary bases to the vtable components vector.
+ void AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ uint64_t FirstBaseOffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases);
+
+ // LayoutVTable - Layout the vtable for the given base class, including its
+ // secondary vtables and any vtables for virtual bases.
+ void LayoutVTable();
+
+ /// LayoutPrimaryAndSecondaryVTables - Layout the primary vtable for the
+ /// given base subobject, as well as all its secondary vtables.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ ///
+ /// \param BaseIsVirtualInLayoutClass - Whether the base subobject is virtual
+ /// in the layout class.
+ void LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
+ uint64_t OffsetInLayoutClass);
+
+ /// LayoutSecondaryVTables - Layout the secondary vtables for the given base
+ /// subobject.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ void LayoutSecondaryVTables(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ uint64_t OffsetInLayoutClass);
+
+ /// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
+ /// class hierarchy.
+ void DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ uint64_t OffsetInLayoutClass,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutVTablesForVirtualBases - Layout vtables for all virtual bases of the
+ /// given base (excluding any primary bases).
+ void LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// isBuildingConstructionVTable - Return whether this vtable builder is
+ /// building a construction vtable.
+ bool isBuildingConstructorVTable() const {
+ return MostDerivedClass != LayoutClass;
+ }
+
+public:
+ VTableBuilder(CodeGenVTables &VTables, const CXXRecordDecl *MostDerivedClass,
+ uint64_t MostDerivedClassOffset, bool MostDerivedClassIsVirtual,
+ const CXXRecordDecl *LayoutClass)
+ : VTables(VTables), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset),
+ MostDerivedClassIsVirtual(MostDerivedClassIsVirtual),
+ LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
+ Overriders(MostDerivedClass, MostDerivedClassOffset, LayoutClass) {
+
+ LayoutVTable();
+ }
+
+ ThunksMapTy::const_iterator thunks_begin() const {
+ return Thunks.begin();
+ }
+
+ ThunksMapTy::const_iterator thunks_end() const {
+ return Thunks.end();
+ }
+
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+
+ /// getNumVTableComponents - Return the number of components in the vtable
+ /// currently built.
+ uint64_t getNumVTableComponents() const {
+ return Components.size();
+ }
+
+ const uint64_t *vtable_components_data_begin() const {
+ return reinterpret_cast<const uint64_t *>(Components.begin());
+ }
+
+ const uint64_t *vtable_components_data_end() const {
+ return reinterpret_cast<const uint64_t *>(Components.end());
+ }
+
+ AddressPointsMapTy::const_iterator address_points_begin() const {
+ return AddressPoints.begin();
+ }
+
+ AddressPointsMapTy::const_iterator address_points_end() const {
+ return AddressPoints.end();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
+ return VTableThunks.begin();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_end() const {
+ return VTableThunks.end();
+ }
+
+ /// dumpLayout - Dump the vtable layout.
+ void dumpLayout(llvm::raw_ostream&);
+};
+
+void VTableBuilder::AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
+ assert(!isBuildingConstructorVTable() &&
+ "Can't add thunks for construction vtable");
+
+ llvm::SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
+
+ // Check if we have this thunk already.
+ if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
+ ThunksVector.end())
+ return;
+
+ ThunksVector.push_back(Thunk);
+}
+
+typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy;
+
+/// ComputeAllOverriddenMethods - Given a method decl, will return a set of all
+/// the overridden methods that the function decl overrides.
+static void
+ComputeAllOverriddenMethods(const CXXMethodDecl *MD,
+ OverriddenMethodsSetTy& OverriddenMethods) {
+ assert(MD->isVirtual() && "Method is not virtual!");
+
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ OverriddenMethods.insert(OverriddenMD);
+
+ ComputeAllOverriddenMethods(OverriddenMD, OverriddenMethods);
+ }
+}
+
+void VTableBuilder::ComputeThisAdjustments() {
+ // Now go through the method info map and see if any of the methods need
+ // 'this' pointer adjustments.
+ for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
+ E = MethodInfoMap.end(); I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ const MethodInfo &MethodInfo = I->second;
+
+ // Ignore adjustments for unused function pointers.
+ uint64_t VTableIndex = MethodInfo.VTableIndex;
+ if (Components[VTableIndex].getKind() ==
+ VTableComponent::CK_UnusedFunctionPointer)
+ continue;
+
+ // Get the final overrider for this method.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(MD, MethodInfo.BaseOffset);
+
+ // Check if we need an adjustment at all.
+ if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) {
+ // When a return thunk is needed by a derived class that overrides a
+ // virtual base, gcc uses a virtual 'this' adjustment as well.
+ // While the thunk itself might be needed by vtables in subclasses or
+ // in construction vtables, there doesn't seem to be a reason for using
+ // the thunk in this vtable. Still, we do so to match gcc.
+ if (VTableThunks.lookup(VTableIndex).Return.isEmpty())
+ continue;
+ }
+
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider);
+
+ if (ThisAdjustment.isEmpty())
+ continue;
+
+ // Add it.
+ VTableThunks[VTableIndex].This = ThisAdjustment;
+
+ if (isa<CXXDestructorDecl>(MD)) {
+ // Add an adjustment for the deleting destructor as well.
+ VTableThunks[VTableIndex + 1].This = ThisAdjustment;
+ }
+ }
+
+ /// Clear the method info map.
+ MethodInfoMap.clear();
+
+ if (isBuildingConstructorVTable()) {
+ // We don't need to store thunk information for construction vtables.
+ return;
+ }
+
+ for (VTableThunksMapTy::const_iterator I = VTableThunks.begin(),
+ E = VTableThunks.end(); I != E; ++I) {
+ const VTableComponent &Component = Components[I->first];
+ const ThunkInfo &Thunk = I->second;
+ const CXXMethodDecl *MD;
+
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind!");
+ case VTableComponent::CK_FunctionPointer:
+ MD = Component.getFunctionDecl();
+ break;
+ case VTableComponent::CK_CompleteDtorPointer:
+ MD = Component.getDestructorDecl();
+ break;
+ case VTableComponent::CK_DeletingDtorPointer:
+ // We've already added the thunk when we saw the complete dtor pointer.
+ continue;
+ }
+
+ if (MD->getParent() == MostDerivedClass)
+ AddThunk(MD, Thunk);
+ }
+}
+
+ReturnAdjustment VTableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
+ ReturnAdjustment Adjustment;
+
+ if (!Offset.isEmpty()) {
+ if (Offset.VirtualBase) {
+ // Get the virtual base offset offset.
+ if (Offset.DerivedClass == MostDerivedClass) {
+ // We can get the offset offset directly from our map.
+ Adjustment.VBaseOffsetOffset =
+ VBaseOffsetOffsets.lookup(Offset.VirtualBase);
+ } else {
+ Adjustment.VBaseOffsetOffset =
+ VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass,
+ Offset.VirtualBase);
+ }
+ }
+
+ Adjustment.NonVirtual = Offset.NonVirtualOffset;
+ }
+
+ return Adjustment;
+}
+
+BaseOffset
+VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const {
+ const CXXRecordDecl *BaseRD = Base.getBase();
+ const CXXRecordDecl *DerivedRD = Derived.getBase();
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true,
+ /*RecordPaths=*/true, /*DetectVirtual=*/true);
+
+ if (!const_cast<CXXRecordDecl *>(DerivedRD)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
+ assert(false && "Class must be derived from the passed in base class!");
+ return BaseOffset();
+ }
+
+ // We have to go through all the paths, and see which one leads us to the
+ // right base subobject.
+ for (CXXBasePaths::const_paths_iterator I = Paths.begin(), E = Paths.end();
+ I != E; ++I) {
+ BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, *I);
+
+ // FIXME: Should not use * 8 here.
+ uint64_t OffsetToBaseSubobject = Offset.NonVirtualOffset * 8;
+
+ if (Offset.VirtualBase) {
+ // If we have a virtual base class, the non-virtual offset is relative
+ // to the virtual base class offset.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ /// Get the virtual base offset, relative to the most derived class
+ /// layout.
+ OffsetToBaseSubobject +=
+ LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
+ } else {
+ // Otherwise, the non-virtual offset is relative to the derived class
+ // offset.
+ OffsetToBaseSubobject += Derived.getBaseOffset();
+ }
+
+ // Check if this path gives us the right base subobject.
+ if (OffsetToBaseSubobject == Base.getBaseOffset()) {
+ // Since we're going from the base class _to_ the derived class, we'll
+ // invert the non-virtual offset here.
+ Offset.NonVirtualOffset = -Offset.NonVirtualOffset;
+ return Offset;
+ }
+ }
+
+ return BaseOffset();
+}
+
+ThisAdjustment
+VTableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
+ uint64_t BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider) {
+ // Ignore adjustments for pure virtual member functions.
+ if (Overrider.Method->isPure())
+ return ThisAdjustment();
+
+ BaseSubobject OverriddenBaseSubobject(MD->getParent(),
+ BaseOffsetInLayoutClass);
+
+ BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
+ Overrider.Offset);
+
+ // Compute the adjustment offset.
+ BaseOffset Offset = ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject,
+ OverriderBaseSubobject);
+ if (Offset.isEmpty())
+ return ThisAdjustment();
+
+ ThisAdjustment Adjustment;
+
+ if (Offset.VirtualBase) {
+ // Get the vcall offset map for this virtual base.
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase];
+
+ if (VCallOffsets.empty()) {
+ // We don't have vcall offsets for this virtual base, go ahead and
+ // build them.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
+ /*FinalOverriders=*/0,
+ BaseSubobject(Offset.VirtualBase, 0),
+ /*BaseIsVirtual=*/true,
+ /*OffsetInLayoutClass=*/0);
+
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ Adjustment.VCallOffsetOffset = VCallOffsets.getVCallOffsetOffset(MD);
+ }
+
+ // Set the non-virtual part of the adjustment.
+ Adjustment.NonVirtual = Offset.NonVirtualOffset;
+
+ return Adjustment;
+}
+
+void
+VTableBuilder::AddMethod(const CXXMethodDecl *MD,
+ ReturnAdjustment ReturnAdjustment) {
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ assert(ReturnAdjustment.isEmpty() &&
+ "Destructor can't have return adjustment!");
+
+ // Add both the complete destructor and the deleting destructor.
+ Components.push_back(VTableComponent::MakeCompleteDtor(DD));
+ Components.push_back(VTableComponent::MakeDeletingDtor(DD));
+ } else {
+ // Add the return adjustment if necessary.
+ if (!ReturnAdjustment.isEmpty())
+ VTableThunks[Components.size()].Return = ReturnAdjustment;
+
+ // Add the function.
+ Components.push_back(VTableComponent::MakeFunction(MD));
+ }
+}
+
+/// OverridesIndirectMethodInBase - Return whether the given member function
+/// overrides any methods in the set of given bases.
+/// Unlike OverridesMethodInBase, this checks "overriders of overriders".
+/// For example, if we have:
+///
+/// struct A { virtual void f(); }
+/// struct B : A { virtual void f(); }
+/// struct C : B { virtual void f(); }
+///
+/// OverridesIndirectMethodInBase will return true if given C::f as the method
+/// and { A } as the set of bases.
+static bool
+OverridesIndirectMethodInBases(const CXXMethodDecl *MD,
+ VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ if (Bases.count(MD->getParent()))
+ return true;
+
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ // Check "indirect overriders".
+ if (OverridesIndirectMethodInBases(OverriddenMD, Bases))
+ return true;
+ }
+
+ return false;
+}
+
+bool
+VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
+ uint64_t BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ uint64_t FirstBaseOffsetInLayoutClass) const {
+ // If the base and the first base in the primary base chain have the same
+ // offsets, then this overrider will be used.
+ if (BaseOffsetInLayoutClass == FirstBaseOffsetInLayoutClass)
+ return true;
+
+ // We know now that Base (or a direct or indirect base of it) is a primary
+ // base in part of the class hierarchy, but not a primary base in the most
+ // derived class.
+
+ // If the overrider is the first base in the primary base chain, we know
+ // that the overrider will be used.
+ if (Overrider->getParent() == FirstBaseInPrimaryBaseChain)
+ return true;
+
+ VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+
+ const CXXRecordDecl *RD = FirstBaseInPrimaryBaseChain;
+ PrimaryBases.insert(RD);
+
+ // Now traverse the base chain, starting with the first base, until we find
+ // the base that is no longer a primary base.
+ while (true) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should always be at offset 0!");
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Now check if this is the primary base that is not a primary base in the
+ // most derived class.
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ FirstBaseOffsetInLayoutClass) {
+ // We found it, stop walking the chain.
+ break;
+ }
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should always be at offset 0!");
+ }
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ assert(false && "Found a duplicate primary base!");
+
+ RD = PrimaryBase;
+ }
+
+ // If the final overrider is an override of one of the primary bases,
+ // then we know that it will be used.
+ return OverridesIndirectMethodInBases(Overrider, PrimaryBases);
+}
+
+/// FindNearestOverriddenMethod - Given a method, returns the overridden method
+/// from the nearest base. Returns null if no method was found.
+static const CXXMethodDecl *
+FindNearestOverriddenMethod(const CXXMethodDecl *MD,
+ VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ OverriddenMethodsSetTy OverriddenMethods;
+ ComputeAllOverriddenMethods(MD, OverriddenMethods);
+
+ for (int I = Bases.size(), E = 0; I != E; --I) {
+ const CXXRecordDecl *PrimaryBase = Bases[I - 1];
+
+ // Now check the overriden methods.
+ for (OverriddenMethodsSetTy::const_iterator I = OverriddenMethods.begin(),
+ E = OverriddenMethods.end(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ // We found our overridden method.
+ if (OverriddenMD->getParent() == PrimaryBase)
+ return OverriddenMD;
+ }
+ }
+
+ return 0;
+}
+
+void
+VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ uint64_t FirstBaseOffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ uint64_t PrimaryBaseOffset;
+ uint64_t PrimaryBaseOffsetInLayoutClass;
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ PrimaryBaseOffsetInLayoutClass = BaseOffsetInLayoutClass;
+ }
+
+ AddMethods(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
+ PrimaryBaseOffsetInLayoutClass, FirstBaseInPrimaryBaseChain,
+ FirstBaseOffsetInLayoutClass, PrimaryBases);
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ assert(false && "Found a duplicate primary base!");
+ }
+
+ // Now go through all virtual member functions and add them.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(MD, Base.getBaseOffset());
+
+ // Check if this virtual member function overrides a method in a primary
+ // base. If this is the case, and the return type doesn't require adjustment
+ // then we can just use the member function from the primary base.
+ if (const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
+ if (ComputeReturnAdjustmentBaseOffset(Context, MD,
+ OverriddenMD).isEmpty()) {
+ // Replace the method info of the overridden method with our own
+ // method.
+ assert(MethodInfoMap.count(OverriddenMD) &&
+ "Did not find the overridden method!");
+ MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD];
+
+ MethodInfo MethodInfo(Base.getBaseOffset(),
+ BaseOffsetInLayoutClass,
+ OverriddenMethodInfo.VTableIndex);
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+ MethodInfoMap.erase(OverriddenMD);
+
+ // If the overridden method exists in a virtual base class or a direct
+ // or indirect base class of a virtual base class, we need to emit a
+ // thunk if we ever have a class hierarchy where the base class is not
+ // a primary base in the complete object.
+ if (!isBuildingConstructorVTable() && OverriddenMD != MD) {
+ // Compute the this adjustment.
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass,
+ Overrider);
+
+ if (ThisAdjustment.VCallOffsetOffset &&
+ Overrider.Method->getParent() == MostDerivedClass) {
+ // This is a virtual thunk for the most derived class, add it.
+ AddThunk(Overrider.Method,
+ ThunkInfo(ThisAdjustment, ReturnAdjustment()));
+ }
+ }
+
+ continue;
+ }
+ }
+
+ // Insert the method info for this method.
+ MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
+ Components.size());
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+
+ // Check if this overrider is going to be used.
+ const CXXMethodDecl *OverriderMD = Overrider.Method;
+ if (!IsOverriderUsed(OverriderMD, BaseOffsetInLayoutClass,
+ FirstBaseInPrimaryBaseChain,
+ FirstBaseOffsetInLayoutClass)) {
+ Components.push_back(VTableComponent::MakeUnusedFunction(OverriderMD));
+ continue;
+ }
+
+ // Check if this overrider needs a return adjustment.
+ // We don't want to do this for pure virtual member functions.
+ BaseOffset ReturnAdjustmentOffset;
+ if (!OverriderMD->isPure()) {
+ ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
+ }
+
+ ReturnAdjustment ReturnAdjustment =
+ ComputeReturnAdjustment(ReturnAdjustmentOffset);
+
+ AddMethod(Overrider.Method, ReturnAdjustment);
+ }
+}
+
+void VTableBuilder::LayoutVTable() {
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass, 0),
+ /*BaseIsMorallyVirtual=*/false,
+ MostDerivedClassIsVirtual,
+ MostDerivedClassOffset);
+
+ VisitedVirtualBasesSetTy VBases;
+
+ // Determine the primary virtual bases.
+ DeterminePrimaryVirtualBases(MostDerivedClass, MostDerivedClassOffset,
+ VBases);
+ VBases.clear();
+
+ LayoutVTablesForVirtualBases(MostDerivedClass, VBases);
+}
+
+void
+VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
+ uint64_t OffsetInLayoutClass) {
+ assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
+
+ // Add vcall and vbase offsets for this vtable.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
+ Base, BaseIsVirtualInLayoutClass,
+ OffsetInLayoutClass);
+ Components.append(Builder.components_begin(), Builder.components_end());
+
+ // Check if we need to add these vcall offsets.
+ if (BaseIsVirtualInLayoutClass && !Builder.getVCallOffsets().empty()) {
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Base.getBase()];
+
+ if (VCallOffsets.empty())
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ // If we're laying out the most derived class we want to keep track of the
+ // virtual base class offset offsets.
+ if (Base.getBase() == MostDerivedClass)
+ VBaseOffsetOffsets = Builder.getVBaseOffsetOffsets();
+
+ // Add the offset to top.
+ // FIXME: We should not use / 8 here.
+ int64_t OffsetToTop = -(int64_t)(OffsetInLayoutClass -
+ MostDerivedClassOffset) / 8;
+ Components.push_back(VTableComponent::MakeOffsetToTop(OffsetToTop));
+
+ // Next, add the RTTI.
+ Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
+
+ uint64_t AddressPoint = Components.size();
+
+ // Now go through all virtual member functions and add them.
+ PrimaryBasesSetVectorTy PrimaryBases;
+ AddMethods(Base, OffsetInLayoutClass, Base.getBase(), OffsetInLayoutClass,
+ PrimaryBases);
+
+ // Compute 'this' pointer adjustments.
+ ComputeThisAdjustments();
+
+ // Add all address points.
+ const CXXRecordDecl *RD = Base.getBase();
+ while (true) {
+ AddressPoints.insert(std::make_pair(BaseSubobject(RD, OffsetInLayoutClass),
+ AddressPoint));
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ // Check if this virtual primary base is a primary base in the layout
+ // class. If it's not, we don't want to add it.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ OffsetInLayoutClass) {
+ // We don't want to add this class (or any of its primary bases).
+ break;
+ }
+ }
+
+ RD = PrimaryBase;
+ }
+
+ // Layout secondary vtables.
+ LayoutSecondaryVTables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass);
+}
+
+void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ uint64_t OffsetInLayoutClass) {
+ // Itanium C++ ABI 2.5.2:
+ // Following the primary virtual table of a derived class are secondary
+ // virtual tables for each of its proper base classes, except any primary
+ // base(s) with which it shares its primary virtual table.
+
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ // Ignore virtual bases, we'll emit them later.
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore bases that don't have a vtable.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ if (isBuildingConstructorVTable()) {
+ // Itanium C++ ABI 2.6.4:
+ // Some of the base class subobjects may not need construction virtual
+ // tables, which will therefore not be present in the construction
+ // virtual table group, even though the subobject virtual tables are
+ // present in the main virtual table group for the complete object.
+ if (!BaseIsMorallyVirtual && !BaseDecl->getNumVBases())
+ continue;
+ }
+
+ // Get the base offset of this base.
+ uint64_t RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ uint64_t BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
+
+ uint64_t BaseOffsetInLayoutClass = OffsetInLayoutClass + RelativeBaseOffset;
+
+ // Don't emit a secondary vtable for a primary base. We might however want
+ // to emit secondary vtables for other bases of this base.
+ if (BaseDecl == PrimaryBase) {
+ LayoutSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual, BaseOffsetInLayoutClass);
+ continue;
+ }
+
+ // Layout the primary vtable (and any secondary vtables) for this base.
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual,
+ /*BaseIsVirtualInLayoutClass=*/false,
+ BaseOffsetInLayoutClass);
+ }
+}
+
+void
+VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ uint64_t OffsetInLayoutClass,
+ VisitedVirtualBasesSetTy &VBases) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Check if this base has a primary base.
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+
+ // Check if it's virtual.
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ bool IsPrimaryVirtualBase = true;
+
+ if (isBuildingConstructorVTable()) {
+ // Check if the base is actually a primary base in the class we use for
+ // layout.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ uint64_t PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+
+ // We know that the base is not a primary base in the layout class if
+ // the base offsets are different.
+ if (PrimaryBaseOffsetInLayoutClass != OffsetInLayoutClass)
+ IsPrimaryVirtualBase = false;
+ }
+
+ if (IsPrimaryVirtualBase)
+ PrimaryVirtualBases.insert(PrimaryBase);
+ }
+ }
+
+ // Traverse bases, looking for more primary virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t BaseOffsetInLayoutClass;
+
+ if (I->isVirtual()) {
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffsetInLayoutClass = LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffsetInLayoutClass =
+ OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl);
+ }
+
+ DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases);
+ }
+}
+
+void
+VTableBuilder::LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ // Itanium C++ ABI 2.5.2:
+ // Then come the virtual base virtual tables, also in inheritance graph
+ // order, and again excluding primary bases (which share virtual tables with
+ // the classes for which they are primary).
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this base needs a vtable. (If it's virtual, not a primary base
+ // of some other class, and we haven't visited it before).
+ if (I->isVirtual() && BaseDecl->isDynamicClass() &&
+ !PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) {
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+ uint64_t BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+ uint64_t BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ /*BaseIsMorallyVirtual=*/true,
+ /*BaseIsVirtualInLayoutClass=*/true,
+ BaseOffsetInLayoutClass);
+ }
+
+ // We only need to check the base for virtual base vtables if it actually
+ // has virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVTablesForVirtualBases(BaseDecl, VBases);
+ }
+}
+
+/// dumpLayout - Dump the vtable layout.
+void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
+
+ if (isBuildingConstructorVTable()) {
+ Out << "Construction vtable for ('";
+ Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
+ // FIXME: Don't use / 8 .
+ Out << MostDerivedClassOffset / 8 << ") in '";
+ Out << LayoutClass->getQualifiedNameAsString();
+ } else {
+ Out << "Vtable for '";
+ Out << MostDerivedClass->getQualifiedNameAsString();
+ }
+ Out << "' (" << Components.size() << " entries).\n";
+
+ // Iterate through the address points and insert them into a new map where
+ // they are keyed by the index and not the base object.
+ // Since an address point can be shared by multiple subobjects, we use an
+ // STL multimap.
+ std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex;
+ for (AddressPointsMapTy::const_iterator I = AddressPoints.begin(),
+ E = AddressPoints.end(); I != E; ++I) {
+ const BaseSubobject& Base = I->first;
+ uint64_t Index = I->second;
+
+ AddressPointsByIndex.insert(std::make_pair(Index, Base));
+ }
+
+ for (unsigned I = 0, E = Components.size(); I != E; ++I) {
+ uint64_t Index = I;
+
+ Out << llvm::format("%4d | ", I);
+
+ const VTableComponent &Component = Components[I];
+
+ // Dump the component.
+ switch (Component.getKind()) {
+
+ case VTableComponent::CK_VCallOffset:
+ Out << "vcall_offset (" << Component.getVCallOffset() << ")";
+ break;
+
+ case VTableComponent::CK_VBaseOffset:
+ Out << "vbase_offset (" << Component.getVBaseOffset() << ")";
+ break;
+
+ case VTableComponent::CK_OffsetToTop:
+ Out << "offset_to_top (" << Component.getOffsetToTop() << ")";
+ break;
+
+ case VTableComponent::CK_RTTI:
+ Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI";
+ break;
+
+ case VTableComponent::CK_FunctionPointer: {
+ const CXXMethodDecl *MD = Component.getFunctionDecl();
+
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this function pointer has a return adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "\n [return adjustment: ";
+ Out << Thunk.Return.NonVirtual << " non-virtual";
+
+ if (Thunk.Return.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ Out << ']';
+ }
+
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
+
+ break;
+ }
+
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ bool IsComplete =
+ Component.getKind() == VTableComponent::CK_CompleteDtorPointer;
+
+ const CXXDestructorDecl *DD = Component.getDestructorDecl();
+
+ Out << DD->getQualifiedNameAsString();
+ if (IsComplete)
+ Out << "() [complete]";
+ else
+ Out << "() [deleting]";
+
+ if (DD->isPure())
+ Out << " [pure]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this destructor has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
+
+ break;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer: {
+ const CXXMethodDecl *MD = Component.getUnusedFunctionDecl();
+
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << "[unused] " << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+ }
+
+ }
+
+ Out << '\n';
+
+ // Dump the next address point.
+ uint64_t NextIndex = Index + 1;
+ if (AddressPointsByIndex.count(NextIndex)) {
+ if (AddressPointsByIndex.count(NextIndex) == 1) {
+ const BaseSubobject &Base =
+ AddressPointsByIndex.find(NextIndex)->second;
+
+ // FIXME: Instead of dividing by 8, we should be using CharUnits.
+ Out << " -- (" << Base.getBase()->getQualifiedNameAsString();
+ Out << ", " << Base.getBaseOffset() / 8 << ") vtable address --\n";
+ } else {
+ uint64_t BaseOffset =
+ AddressPointsByIndex.lower_bound(NextIndex)->second.getBaseOffset();
+
+ // We store the class names in a set to get a stable order.
+ std::set<std::string> ClassNames;
+ for (std::multimap<uint64_t, BaseSubobject>::const_iterator I =
+ AddressPointsByIndex.lower_bound(NextIndex), E =
+ AddressPointsByIndex.upper_bound(NextIndex); I != E; ++I) {
+ assert(I->second.getBaseOffset() == BaseOffset &&
+ "Invalid base offset!");
+ const CXXRecordDecl *RD = I->second.getBase();
+ ClassNames.insert(RD->getQualifiedNameAsString());
+ }
+
+ for (std::set<std::string>::const_iterator I = ClassNames.begin(),
+ E = ClassNames.end(); I != E; ++I) {
+ // FIXME: Instead of dividing by 8, we should be using CharUnits.
+ Out << " -- (" << *I;
+ Out << ", " << BaseOffset / 8 << ") vtable address --\n";
+ }
+ }
+ }
+ }
+
+ Out << '\n';
+
+ if (isBuildingConstructorVTable())
+ return;
+
+ if (MostDerivedClass->getNumVBases()) {
+ // We store the virtual base class names and their offsets in a map to get
+ // a stable order.
+
+ std::map<std::string, int64_t> ClassNamesAndOffsets;
+ for (VBaseOffsetOffsetsMapTy::const_iterator I = VBaseOffsetOffsets.begin(),
+ E = VBaseOffsetOffsets.end(); I != E; ++I) {
+ std::string ClassName = I->first->getQualifiedNameAsString();
+ int64_t OffsetOffset = I->second;
+ ClassNamesAndOffsets.insert(std::make_pair(ClassName, OffsetOffset));
+ }
+
+ Out << "Virtual base offset offsets for '";
+ Out << MostDerivedClass->getQualifiedNameAsString() << "' (";
+ Out << ClassNamesAndOffsets.size();
+ Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (std::map<std::string, int64_t>::const_iterator I =
+ ClassNamesAndOffsets.begin(), E = ClassNamesAndOffsets.end();
+ I != E; ++I)
+ Out << " " << I->first << " | " << I->second << '\n';
+
+ Out << "\n";
+ }
+
+ if (!Thunks.empty()) {
+ // We store the method names in a map to get a stable order.
+ std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
+
+ for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end();
+ I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ std::string MethodName =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+
+ MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
+ }
+
+ for (std::map<std::string, const CXXMethodDecl *>::const_iterator I =
+ MethodNamesAndDecls.begin(), E = MethodNamesAndDecls.end();
+ I != E; ++I) {
+ const std::string &MethodName = I->first;
+ const CXXMethodDecl *MD = I->second;
+
+ ThunkInfoVectorTy ThunksVector = Thunks[MD];
+ std::sort(ThunksVector.begin(), ThunksVector.end());
+
+ Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
+ Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) {
+ const ThunkInfo &Thunk = ThunksVector[I];
+
+ Out << llvm::format("%4d | ", I);
+
+ // If this function pointer has a return pointer adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "return adjustment: " << Thunk.This.NonVirtual;
+ Out << " non-virtual";
+ if (Thunk.Return.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ if (!Thunk.This.isEmpty())
+ Out << "\n ";
+ }
+
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+ }
+
+ Out << '\n';
+ }
+
+ Out << '\n';
+
+ }
+ }
+}
+
+}
+
+void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
+
+ // Itanium C++ ABI 2.5.2:
+ // The order of the virtual function pointers in a virtual table is the
+ // order of declaration of the corresponding member functions in the class.
+ //
+ // There is an entry for any virtual function declared in a class,
+ // whether it is a new function or overrides a base class function,
+ // unless it overrides a function from the primary base, and conversion
+ // between their return types does not require an adjustment.
+
+ int64_t CurrentIndex = 0;
+
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (PrimaryBase) {
+ assert(PrimaryBase->isDefinition() &&
+ "Should have the definition decl of the primary base!");
+
+ // Since the record decl shares its vtable pointer with the primary base
+ // we need to start counting at the end of the primary base's vtable.
+ CurrentIndex = getNumVirtualFunctionPointers(PrimaryBase);
+ }
+
+ // Collect all the primary bases, so we can check whether methods override
+ // a method from the base.
+ VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+ for (ASTRecordLayout::primary_base_info_iterator
+ I = Layout.primary_base_begin(), E = Layout.primary_base_end();
+ I != E; ++I)
+ PrimaryBases.insert((*I).getBase());
+
+ const CXXDestructorDecl *ImplicitVirtualDtor = 0;
+
+ for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+ e = RD->method_end(); i != e; ++i) {
+ const CXXMethodDecl *MD = *i;
+
+ // We only want virtual methods.
+ if (!MD->isVirtual())
+ continue;
+
+ // Check if this method overrides a method in the primary base.
+ if (const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
+ // Check if converting from the return type of the method to the
+ // return type of the overridden method requires conversion.
+ if (ComputeReturnAdjustmentBaseOffset(CGM.getContext(), MD,
+ OverriddenMD).isEmpty()) {
+ // This index is shared between the index in the vtable of the primary
+ // base class.
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ const CXXDestructorDecl *OverriddenDD =
+ cast<CXXDestructorDecl>(OverriddenMD);
+
+ // Add both the complete and deleting entries.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] =
+ getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Complete));
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] =
+ getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Deleting));
+ } else {
+ MethodVTableIndices[MD] = getMethodVTableIndex(OverriddenMD);
+ }
+
+ // We don't need to add an entry for this method.
+ continue;
+ }
+ }
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (MD->isImplicit()) {
+ assert(!ImplicitVirtualDtor &&
+ "Did already see an implicit virtual dtor!");
+ ImplicitVirtualDtor = DD;
+ continue;
+ }
+
+ // Add the complete dtor.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] = CurrentIndex++;
+
+ // Add the deleting dtor.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] = CurrentIndex++;
+ } else {
+ // Add the entry.
+ MethodVTableIndices[MD] = CurrentIndex++;
+ }
+ }
+
+ if (ImplicitVirtualDtor) {
+ // Itanium C++ ABI 2.5.2:
+ // If a class has an implicitly-defined virtual destructor,
+ // its entries come after the declared virtual function pointers.
+
+ // Add the complete dtor.
+ MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Complete)] =
+ CurrentIndex++;
+
+ // Add the deleting dtor.
+ MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Deleting)] =
+ CurrentIndex++;
+ }
+
+ NumVirtualFunctionPointers[RD] = CurrentIndex;
+}
+
+uint64_t CodeGenVTables::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
+ NumVirtualFunctionPointers.find(RD);
+ if (I != NumVirtualFunctionPointers.end())
+ return I->second;
+
+ ComputeMethodVTableIndices(RD);
+
+ I = NumVirtualFunctionPointers.find(RD);
+ assert(I != NumVirtualFunctionPointers.end() && "Did not find entry!");
+ return I->second;
+}
+
+uint64_t CodeGenVTables::getMethodVTableIndex(GlobalDecl GD) {
+ MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD);
+ if (I != MethodVTableIndices.end())
+ return I->second;
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ ComputeMethodVTableIndices(RD);
+
+ I = MethodVTableIndices.find(GD);
+ assert(I != MethodVTableIndices.end() && "Did not find index!");
+ return I->second;
+}
+
+int64_t CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase) {
+ ClassPairTy ClassPair(RD, VBase);
+
+ VirtualBaseClassOffsetOffsetsMapTy::iterator I =
+ VirtualBaseClassOffsetOffsets.find(ClassPair);
+ if (I != VirtualBaseClassOffsetOffsets.end())
+ return I->second;
+
+ VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/0,
+ BaseSubobject(RD, 0),
+ /*BaseIsVirtual=*/false,
+ /*OffsetInLayoutClass=*/0);
+
+ for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ Builder.getVBaseOffsetOffsets().begin(),
+ E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I->first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
+ }
+
+ I = VirtualBaseClassOffsetOffsets.find(ClassPair);
+ assert(I != VirtualBaseClassOffsetOffsets.end() && "Did not find index!");
+
+ return I->second;
+}
+
+uint64_t
+CodeGenVTables::getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD) {
+ assert(AddressPoints.count(std::make_pair(RD, Base)) &&
+ "Did not find address point!");
+
+ uint64_t AddressPoint = AddressPoints.lookup(std::make_pair(RD, Base));
+ assert(AddressPoint && "Address point must not be zero!");
+
+ return AddressPoint;
+}
+
+llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // Compute the mangled name.
+ llvm::SmallString<256> Name;
+ if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
+ getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
+ Thunk.This, Name);
+ else
+ getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Name);
+
+ const llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
+ return GetOrCreateLLVMFunction(Name, Ty, GD);
+}
+
+static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ int64_t NonVirtualAdjustment,
+ int64_t VirtualAdjustment) {
+ if (!NonVirtualAdjustment && !VirtualAdjustment)
+ return Ptr;
+
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+
+ llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
+
+ if (NonVirtualAdjustment) {
+ // Do the non-virtual adjustment.
+ V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ }
+
+ if (VirtualAdjustment) {
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ // Do the virtual adjustment.
+ llvm::Value *VTablePtrPtr =
+ CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
+
+ llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
+
+ llvm::Value *OffsetPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
+
+ OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
+
+ // Load the adjustment offset from the vtable.
+ llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr);
+
+ // Adjust our pointer.
+ V = CGF.Builder.CreateInBoundsGEP(V, Offset);
+ }
+
+ // Cast back to the original type.
+ return CGF.Builder.CreateBitCast(V, Ptr->getType());
+}
+
+static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk, llvm::Function *Fn) {
+ CGM.setGlobalVisibility(Fn, MD);
+
+ if (!CGM.getCodeGenOpts().HiddenWeakVTables)
+ return;
+
+ // If the thunk has weak/linkonce linkage, but the function must be
+ // emitted in every translation unit that references it, then we can
+ // emit its thunks with hidden visibility, since its thunks must be
+ // emitted when the function is.
+
+ // This follows CodeGenModule::setTypeVisibility; see the comments
+ // there for explanation.
+
+ if ((Fn->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage &&
+ Fn->getLinkage() != llvm::GlobalVariable::WeakODRLinkage) ||
+ Fn->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
+ return;
+
+ if (MD->hasAttr<VisibilityAttr>())
+ return;
+
+ switch (MD->getTemplateSpecializationKind()) {
+ case TSK_ExplicitInstantiationDefinition:
+ case TSK_ExplicitInstantiationDeclaration:
+ return;
+
+ case TSK_Undeclared:
+ break;
+
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ if (!CGM.getCodeGenOpts().HiddenWeakTemplateVTables)
+ return;
+ break;
+ }
+
+ // If there's an explicit definition, and that definition is
+ // out-of-line, then we can't assume that all users will have a
+ // definition to emit.
+ const FunctionDecl *Def = 0;
+ if (MD->hasBody(Def) && Def->isOutOfLine())
+ return;
+
+ Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
+}
+
+void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+ QualType ThisType = MD->getThisType(getContext());
+
+ FunctionArgList FunctionArgs;
+
+ // FIXME: It would be nice if more of this code could be shared with
+ // CodeGenFunction::GenerateCode.
+
+ // Create the implicit 'this' parameter declaration.
+ CurGD = GD;
+ CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs);
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *Param = *I;
+
+ FunctionArgs.push_back(std::make_pair(Param, Param->getType()));
+ }
+
+ StartFunction(GlobalDecl(), ResultType, Fn, FunctionArgs, SourceLocation());
+
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+
+ // Adjust the 'this' pointer if necessary.
+ llvm::Value *AdjustedThisPtr =
+ PerformTypeAdjustment(*this, LoadCXXThis(),
+ Thunk.This.NonVirtual,
+ Thunk.This.VCallOffsetOffset);
+
+ CallArgList CallArgs;
+
+ // Add our adjusted 'this' pointer.
+ CallArgs.push_back(std::make_pair(RValue::get(AdjustedThisPtr), ThisType));
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *Param = *I;
+ QualType ArgType = Param->getType();
+ RValue Arg = EmitDelegateCallArg(Param);
+
+ CallArgs.push_back(std::make_pair(Arg, ArgType));
+ }
+
+ // Get our callee.
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(GD),
+ FPT->isVariadic());
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
+
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
+ FPT->getExtInfo());
+
+ // Determine whether we have a return value slot to use.
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
+ // Now emit our call.
+ RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD);
+
+ if (!Thunk.Return.isEmpty()) {
+ // Emit the return adjustment.
+ bool NullCheckValue = !ResultType->isReferenceType();
+
+ llvm::BasicBlock *AdjustNull = 0;
+ llvm::BasicBlock *AdjustNotNull = 0;
+ llvm::BasicBlock *AdjustEnd = 0;
+
+ llvm::Value *ReturnValue = RV.getScalarVal();
+
+ if (NullCheckValue) {
+ AdjustNull = createBasicBlock("adjust.null");
+ AdjustNotNull = createBasicBlock("adjust.notnull");
+ AdjustEnd = createBasicBlock("adjust.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(ReturnValue);
+ Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
+ EmitBlock(AdjustNotNull);
+ }
+
+ ReturnValue = PerformTypeAdjustment(*this, ReturnValue,
+ Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(AdjustEnd);
+ EmitBlock(AdjustNull);
+ Builder.CreateBr(AdjustEnd);
+ EmitBlock(AdjustEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(ReturnValue->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(ReturnValue, AdjustNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
+ AdjustNull);
+ ReturnValue = PHI;
+ }
+
+ RV = RValue::get(ReturnValue);
+ }
+
+ if (!ResultType->isVoidType() && Slot.isNull())
+ CGM.getCXXABI().EmitReturnFromThunk(CGF, RV, ResultType);
+
+ FinishFunction();
+
+ // Set the right linkage.
+ CGM.setFunctionLinkage(MD, Fn);
+
+ // Set the right visibility.
+ setThunkVisibility(CGM, MD, Thunk, Fn);
+}
+
+void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk)
+{
+ llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+ // There's already a declaration with the same name, check if it has the same
+ // type or if we need to replace it.
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() !=
+ CGM.getTypes().GetFunctionTypeForVTable(GD)) {
+ llvm::GlobalValue *OldThunkFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldThunkFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // Remove the name from the old thunk function and get a new thunk.
+ OldThunkFn->setName(llvm::StringRef());
+ Entry = CGM.GetAddrOfThunk(GD, Thunk);
+
+ // If needed, replace the old thunk with a bitcast.
+ if (!OldThunkFn->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
+ OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Remove the old thunk.
+ OldThunkFn->eraseFromParent();
+ }
+
+ // Actually generate the thunk body.
+ llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
+ CodeGenFunction(CGM).GenerateThunk(ThunkFn, GD, Thunk);
+}
+
+void CodeGenVTables::EmitThunks(GlobalDecl GD)
+{
+ const CXXMethodDecl *MD =
+ cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
+
+ // We don't need to generate thunks for the base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return;
+
+ const CXXRecordDecl *RD = MD->getParent();
+
+ // Compute VTable related info for this class.
+ ComputeVTableRelatedInformation(RD, false);
+
+ ThunksMapTy::const_iterator I = Thunks.find(MD);
+ if (I == Thunks.end()) {
+ // We did not find a thunk for this method.
+ return;
+ }
+
+ const ThunkInfoVectorTy &ThunkInfoVector = I->second;
+ for (unsigned I = 0, E = ThunkInfoVector.size(); I != E; ++I)
+ EmitThunk(GD, ThunkInfoVector[I]);
+}
+
+void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
+ bool RequireVTable) {
+ VTableLayoutData &Entry = VTableLayoutMap[RD];
+
+ // We may need to generate a definition for this vtable.
+ if (RequireVTable && !Entry.getInt()) {
+ if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) &&
+ RD->getTemplateSpecializationKind()
+ != TSK_ExplicitInstantiationDeclaration)
+ CGM.DeferredVTables.push_back(RD);
+
+ Entry.setInt(true);
+ }
+
+ // Check if we've computed this information before.
+ if (Entry.getPointer())
+ return;
+
+ VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
+
+ // Add the VTable layout.
+ uint64_t NumVTableComponents = Builder.getNumVTableComponents();
+ uint64_t *LayoutData = new uint64_t[NumVTableComponents + 1];
+ Entry.setPointer(LayoutData);
+
+ // Store the number of components.
+ LayoutData[0] = NumVTableComponents;
+
+ // Store the components.
+ std::copy(Builder.vtable_components_data_begin(),
+ Builder.vtable_components_data_end(),
+ &LayoutData[1]);
+
+ // Add the known thunks.
+ Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
+
+ // Add the thunks needed in this vtable.
+ assert(!VTableThunksMap.count(RD) &&
+ "Thunks already exists for this vtable!");
+
+ VTableThunksTy &VTableThunks = VTableThunksMap[RD];
+ VTableThunks.append(Builder.vtable_thunks_begin(),
+ Builder.vtable_thunks_end());
+
+ // Sort them.
+ std::sort(VTableThunks.begin(), VTableThunks.end());
+
+ // Add the address points.
+ for (VTableBuilder::AddressPointsMapTy::const_iterator I =
+ Builder.address_points_begin(), E = Builder.address_points_end();
+ I != E; ++I) {
+
+ uint64_t &AddressPoint = AddressPoints[std::make_pair(RD, I->first)];
+
+ // Check if we already have the address points for this base.
+ assert(!AddressPoint && "Address point already exists for this base!");
+
+ AddressPoint = I->second;
+ }
+
+ // If we don't have the vbase information for this class, insert it.
+ // getVirtualBaseOffsetOffset will compute it separately without computing
+ // the rest of the vtable related information.
+ if (!RD->getNumVBases())
+ return;
+
+ const RecordType *VBaseRT =
+ RD->vbases_begin()->getType()->getAs<RecordType>();
+ const CXXRecordDecl *VBase = cast<CXXRecordDecl>(VBaseRT->getDecl());
+
+ if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase)))
+ return;
+
+ for (VTableBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ Builder.getVBaseOffsetOffsets().begin(),
+ E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I->first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
+ }
+}
+
+llvm::Constant *
+CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
+ const uint64_t *Components,
+ unsigned NumComponents,
+ const VTableThunksTy &VTableThunks) {
+ llvm::SmallVector<llvm::Constant *, 64> Inits;
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+
+ const llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ QualType ClassType = CGM.getContext().getTagDeclType(RD);
+ llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(ClassType);
+
+ unsigned NextVTableThunkIndex = 0;
+
+ llvm::Constant* PureVirtualFn = 0;
+
+ for (unsigned I = 0; I != NumComponents; ++I) {
+ VTableComponent Component =
+ VTableComponent::getFromOpaqueInteger(Components[I]);
+
+ llvm::Constant *Init = 0;
+
+ switch (Component.getKind()) {
+ case VTableComponent::CK_VCallOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVCallOffset());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_VBaseOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVBaseOffset());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_OffsetToTop:
+ Init = llvm::ConstantInt::get(PtrDiffTy, Component.getOffsetToTop());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_RTTI:
+ Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
+ break;
+ case VTableComponent::CK_FunctionPointer:
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ GlobalDecl GD;
+
+ // Get the right global decl.
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind");
+ case VTableComponent::CK_FunctionPointer:
+ GD = Component.getFunctionDecl();
+ break;
+ case VTableComponent::CK_CompleteDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
+ break;
+ case VTableComponent::CK_DeletingDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
+ break;
+ }
+
+ if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
+ // We have a pure virtual member function.
+ if (!PureVirtualFn) {
+ const llvm::FunctionType *Ty =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
+ /*isVarArg=*/false);
+ PureVirtualFn =
+ CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual");
+ PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
+ Int8PtrTy);
+ }
+
+ Init = PureVirtualFn;
+ } else {
+ // Check if we should use a thunk.
+ if (NextVTableThunkIndex < VTableThunks.size() &&
+ VTableThunks[NextVTableThunkIndex].first == I) {
+ const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
+
+ Init = CGM.GetAddrOfThunk(GD, Thunk);
+
+ NextVTableThunkIndex++;
+ } else {
+ const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
+
+ Init = CGM.GetAddrOfFunction(GD, Ty);
+ }
+
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+ }
+ break;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer:
+ Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
+ break;
+ };
+
+ Inits.push_back(Init);
+ }
+
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
+ return llvm::ConstantArray::get(ArrayType, Inits.data(), Inits.size());
+}
+
+/// GetGlobalVariable - Will return a global variable of the given type.
+/// If a variable with a different type already exists then a new variable
+/// with the right type will be created.
+/// FIXME: We should move this to CodeGenModule and rename it to something
+/// better and then use it in CGVTT and CGRTTI.
+static llvm::GlobalVariable *
+GetGlobalVariable(llvm::Module &Module, llvm::StringRef Name,
+ const llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+
+ llvm::GlobalVariable *GV = Module.getNamedGlobal(Name);
+ llvm::GlobalVariable *OldGV = 0;
+
+ if (GV) {
+ // Check if the variable has the right type.
+ if (GV->getType()->getElementType() == Ty)
+ return GV;
+
+ assert(GV->isDeclaration() && "Declaration has wrong type!");
+
+ OldGV = GV;
+ }
+
+ // Create a new variable.
+ GV = new llvm::GlobalVariable(Module, Ty, /*isConstant=*/true,
+ Linkage, 0, Name);
+
+ if (OldGV) {
+ // Replace occurrences of the old variable if needed.
+ GV->takeName(OldGV);
+
+ if (!OldGV->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ OldGV->eraseFromParent();
+ }
+
+ return GV;
+}
+
+llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
+ llvm::SmallString<256> OutName;
+ CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ ComputeVTableRelatedInformation(RD, true);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, getNumVTableComponents(RD));
+
+ return GetGlobalVariable(CGM.getModule(), Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+}
+
+void
+CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ // Dump the vtable layout if necessary.
+ if (CGM.getLangOptions().DumpVTableLayouts) {
+ VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
+
+ Builder.dumpLayout(llvm::errs());
+ }
+
+ assert(VTableThunksMap.count(RD) &&
+ "No thunk status for this record decl!");
+
+ const VTableThunksTy& Thunks = VTableThunksMap[RD];
+
+ // Create and set the initializer.
+ llvm::Constant *Init =
+ CreateVTableInitializer(RD, getVTableComponentsData(RD),
+ getNumVTableComponents(RD), Thunks);
+ VTable->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTable->setLinkage(Linkage);
+
+ // Set the right visibility.
+ CGM.setTypeVisibility(VTable, RD, /*ForRTTI*/ false);
+}
+
+llvm::GlobalVariable *
+CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
+ const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ VTableAddressPointsMapTy& AddressPoints) {
+ VTableBuilder Builder(*this, Base.getBase(), Base.getBaseOffset(),
+ /*MostDerivedClassIsVirtual=*/BaseIsVirtual, RD);
+
+ // Dump the vtable layout if necessary.
+ if (CGM.getLangOptions().DumpVTableLayouts)
+ Builder.dumpLayout(llvm::errs());
+
+ // Add the address points.
+ AddressPoints.insert(Builder.address_points_begin(),
+ Builder.address_points_end());
+
+ // Get the mangled construction vtable name.
+ llvm::SmallString<256> OutName;
+ CGM.getCXXABI().getMangleContext().
+ mangleCXXCtorVTable(RD, Base.getBaseOffset() / 8, Base.getBase(), OutName);
+ llvm::StringRef Name = OutName.str();
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getNumVTableComponents());
+
+ // Create the variable that will hold the construction vtable.
+ llvm::GlobalVariable *VTable =
+ GetGlobalVariable(CGM.getModule(), Name, ArrayType,
+ llvm::GlobalValue::InternalLinkage);
+
+ // Add the thunks.
+ VTableThunksTy VTableThunks;
+ VTableThunks.append(Builder.vtable_thunks_begin(),
+ Builder.vtable_thunks_end());
+
+ // Sort them.
+ std::sort(VTableThunks.begin(), VTableThunks.end());
+
+ // Create and set the initializer.
+ llvm::Constant *Init =
+ CreateVTableInitializer(Base.getBase(),
+ Builder.vtable_components_data_begin(),
+ Builder.getNumVTableComponents(), VTableThunks);
+ VTable->setInitializer(Init);
+
+ return VTable;
+}
+
+void
+CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ llvm::GlobalVariable *&VTable = VTables[RD];
+ if (VTable) {
+ assert(VTable->getInitializer() && "VTable doesn't have a definition!");
+ return;
+ }
+
+ VTable = GetAddrOfVTable(RD);
+ EmitVTableDefinition(VTable, Linkage, RD);
+
+ GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD);
+
+ // If this is the magic class __cxxabiv1::__fundamental_type_info,
+ // we will emit the typeinfo for the fundamental types. This is the
+ // same behaviour as GCC.
+ const DeclContext *DC = RD->getDeclContext();
+ if (RD->getIdentifier() &&
+ RD->getIdentifier()->isStr("__fundamental_type_info") &&
+ isa<NamespaceDecl>(DC) &&
+ cast<NamespaceDecl>(DC)->getIdentifier() &&
+ cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
+ DC->getParent()->isTranslationUnit())
+ CGM.EmitFundamentalRTTIDescriptors();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
new file mode 100644
index 0000000..abcafd6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
@@ -0,0 +1,369 @@
+//===--- CGVTables.h - Emit LLVM Code for C++ vtables ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVTABLE_H
+#define CLANG_CODEGEN_CGVTABLE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/GlobalVariable.h"
+#include "GlobalDecl.h"
+
+namespace clang {
+ class CXXRecordDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+/// ReturnAdjustment - A return adjustment.
+struct ReturnAdjustment {
+ /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// VBaseOffsetOffset - The offset (in bytes), relative to the address point
+ /// of the virtual base class offset.
+ int64_t VBaseOffsetOffset;
+
+ ReturnAdjustment() : NonVirtual(0), VBaseOffsetOffset(0) { }
+
+ bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; }
+
+ friend bool operator==(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VBaseOffsetOffset == RHS.VBaseOffsetOffset;
+ }
+
+ friend bool operator<(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VBaseOffsetOffset < RHS.VBaseOffsetOffset;
+ }
+};
+
+/// ThisAdjustment - A 'this' pointer adjustment.
+struct ThisAdjustment {
+ /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// VCallOffsetOffset - The offset (in bytes), relative to the address point,
+ /// of the virtual call offset.
+ int64_t VCallOffsetOffset;
+
+ ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { }
+
+ bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; }
+
+ friend bool operator==(const ThisAdjustment &LHS,
+ const ThisAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VCallOffsetOffset == RHS.VCallOffsetOffset;
+ }
+
+ friend bool operator<(const ThisAdjustment &LHS,
+ const ThisAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VCallOffsetOffset < RHS.VCallOffsetOffset;
+ }
+};
+
+/// ThunkInfo - The 'this' pointer adjustment as well as an optional return
+/// adjustment for a thunk.
+struct ThunkInfo {
+ /// This - The 'this' pointer adjustment.
+ ThisAdjustment This;
+
+ /// Return - The return adjustment.
+ ReturnAdjustment Return;
+
+ ThunkInfo() { }
+
+ ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return)
+ : This(This), Return(Return) { }
+
+ friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ return LHS.This == RHS.This && LHS.Return == RHS.Return;
+ }
+
+ friend bool operator<(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ if (LHS.This < RHS.This)
+ return true;
+
+ return LHS.This == RHS.This && LHS.Return < RHS.Return;
+ }
+
+ bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); }
+};
+
+// BaseSubobject - Uniquely identifies a direct or indirect base class.
+// Stores both the base class decl and the offset from the most derived class to
+// the base class.
+class BaseSubobject {
+ /// Base - The base class declaration.
+ const CXXRecordDecl *Base;
+
+ /// BaseOffset - The offset from the most derived class to the base class.
+ uint64_t BaseOffset;
+
+public:
+ BaseSubobject(const CXXRecordDecl *Base, uint64_t BaseOffset)
+ : Base(Base), BaseOffset(BaseOffset) { }
+
+ /// getBase - Returns the base class declaration.
+ const CXXRecordDecl *getBase() const { return Base; }
+
+ /// getBaseOffset - Returns the base class offset.
+ uint64_t getBaseOffset() const { return BaseOffset; }
+
+ friend bool operator==(const BaseSubobject &LHS, const BaseSubobject &RHS) {
+ return LHS.Base == RHS.Base && LHS.BaseOffset == RHS.BaseOffset;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+namespace llvm {
+
+template<> struct DenseMapInfo<clang::CodeGen::BaseSubobject> {
+ static clang::CodeGen::BaseSubobject getEmptyKey() {
+ return clang::CodeGen::BaseSubobject(
+ DenseMapInfo<const clang::CXXRecordDecl *>::getEmptyKey(),
+ DenseMapInfo<uint64_t>::getEmptyKey());
+ }
+
+ static clang::CodeGen::BaseSubobject getTombstoneKey() {
+ return clang::CodeGen::BaseSubobject(
+ DenseMapInfo<const clang::CXXRecordDecl *>::getTombstoneKey(),
+ DenseMapInfo<uint64_t>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const clang::CodeGen::BaseSubobject &Base) {
+ return
+ DenseMapInfo<const clang::CXXRecordDecl *>::getHashValue(Base.getBase()) ^
+ DenseMapInfo<uint64_t>::getHashValue(Base.getBaseOffset());
+ }
+
+ static bool isEqual(const clang::CodeGen::BaseSubobject &LHS,
+ const clang::CodeGen::BaseSubobject &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// It's OK to treat BaseSubobject as a POD type.
+template <> struct isPodLike<clang::CodeGen::BaseSubobject> {
+ static const bool value = true;
+};
+
+}
+
+namespace clang {
+namespace CodeGen {
+
+class CodeGenVTables {
+ CodeGenModule &CGM;
+
+ /// MethodVTableIndices - Contains the index (relative to the vtable address
+ /// point) where the function pointer for a virtual function is stored.
+ typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVTableIndicesTy;
+ MethodVTableIndicesTy MethodVTableIndices;
+
+ typedef std::pair<const CXXRecordDecl *,
+ const CXXRecordDecl *> ClassPairTy;
+
+ /// VirtualBaseClassOffsetOffsets - Contains the vtable offset (relative to
+ /// the address point) in bytes where the offsets for virtual bases of a class
+ /// are stored.
+ typedef llvm::DenseMap<ClassPairTy, int64_t>
+ VirtualBaseClassOffsetOffsetsMapTy;
+ VirtualBaseClassOffsetOffsetsMapTy VirtualBaseClassOffsetOffsets;
+
+ /// VTables - All the vtables which have been defined.
+ llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
+
+ /// NumVirtualFunctionPointers - Contains the number of virtual function
+ /// pointers in the vtable for a given record decl.
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t> NumVirtualFunctionPointers;
+
+ typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
+
+ /// Thunks - Contains all thunks that a given method decl will need.
+ ThunksMapTy Thunks;
+
+ // The layout entry and a bool indicating whether we've actually emitted
+ // the vtable.
+ typedef llvm::PointerIntPair<uint64_t *, 1, bool> VTableLayoutData;
+ typedef llvm::DenseMap<const CXXRecordDecl *, VTableLayoutData>
+ VTableLayoutMapTy;
+
+ /// VTableLayoutMap - Stores the vtable layout for all record decls.
+ /// The layout is stored as an array of 64-bit integers, where the first
+ /// integer is the number of vtable entries in the layout, and the subsequent
+ /// integers are the vtable components.
+ VTableLayoutMapTy VTableLayoutMap;
+
+ typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> AddressPointsMapTy;
+
+ /// Address points - Address points for all vtables.
+ AddressPointsMapTy AddressPoints;
+
+ /// VTableAddressPointsMapTy - Address points for a single vtable.
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy;
+
+ typedef llvm::SmallVector<std::pair<uint64_t, ThunkInfo>, 1>
+ VTableThunksTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, VTableThunksTy>
+ VTableThunksMapTy;
+
+ /// VTableThunksMap - Contains thunks needed by vtables.
+ VTableThunksMapTy VTableThunksMap;
+
+ uint64_t getNumVTableComponents(const CXXRecordDecl *RD) const {
+ assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
+
+ return VTableLayoutMap.lookup(RD).getPointer()[0];
+ }
+
+ const uint64_t *getVTableComponentsData(const CXXRecordDecl *RD) const {
+ assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
+
+ uint64_t *Components = VTableLayoutMap.lookup(RD).getPointer();
+ return &Components[1];
+ }
+
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy;
+
+ /// SubVTTIndicies - Contains indices into the various sub-VTTs.
+ SubVTTIndiciesMapTy SubVTTIndicies;
+
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t>
+ SecondaryVirtualPointerIndicesMapTy;
+
+ /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer
+ /// indices.
+ SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices;
+
+ /// getNumVirtualFunctionPointers - Return the number of virtual function
+ /// pointers in the vtable for a given record decl.
+ uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD);
+
+ void ComputeMethodVTableIndices(const CXXRecordDecl *RD);
+
+ llvm::GlobalVariable *GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
+ bool GenerateDefinition,
+ const CXXRecordDecl *RD);
+
+ /// EmitThunk - Emit a single thunk.
+ void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk);
+
+ /// ComputeVTableRelatedInformation - Compute and store all vtable related
+ /// information (vtable layout, vbase offset offsets, thunks etc) for the
+ /// given record decl.
+ void ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
+ bool VTableRequired);
+
+ /// CreateVTableInitializer - Create a vtable initializer for the given record
+ /// decl.
+ /// \param Components - The vtable components; this is really an array of
+ /// VTableComponents.
+ llvm::Constant *CreateVTableInitializer(const CXXRecordDecl *RD,
+ const uint64_t *Components,
+ unsigned NumComponents,
+ const VTableThunksTy &VTableThunks);
+
+public:
+ CodeGenVTables(CodeGenModule &CGM)
+ : CGM(CGM) { }
+
+ // isKeyFunctionInAnotherTU - True if this record has a key function and it is
+ // in another translation unit.
+ static bool isKeyFunctionInAnotherTU(ASTContext &Context,
+ const CXXRecordDecl *RD) {
+ assert (RD->isDynamicClass() && "Non dynamic classes have no key.");
+ const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
+ return KeyFunction && !KeyFunction->hasBody();
+ }
+
+ /// needsVTTParameter - Return whether the given global decl needs a VTT
+ /// parameter, which it does if it's a base constructor or destructor with
+ /// virtual bases.
+ static bool needsVTTParameter(GlobalDecl GD);
+
+ /// getSubVTTIndex - Return the index of the sub-VTT for the base class of the
+ /// given record decl.
+ uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base);
+
+ /// getSecondaryVirtualPointerIndex - Return the index in the VTT where the
+ /// virtual pointer for the given subobject is located.
+ uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base);
+
+ /// getMethodVTableIndex - Return the index (relative to the vtable address
+ /// point) where the function pointer for the given virtual function is
+ /// stored.
+ uint64_t getMethodVTableIndex(GlobalDecl GD);
+
+ /// getVirtualBaseOffsetOffset - Return the offset in bytes (relative to the
+ /// vtable address point) where the offset of the virtual base that contains
+ /// the given base is stored, otherwise, if no virtual base contains the given
+ /// class, return 0. Base must be a virtual base class or an unambigious
+ /// base.
+ int64_t getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase);
+
+ /// getAddressPoint - Get the address point of the given subobject in the
+ /// class decl.
+ uint64_t getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD);
+
+ /// GetAddrOfVTable - Get the address of the vtable for the given record decl.
+ llvm::GlobalVariable *GetAddrOfVTable(const CXXRecordDecl *RD);
+
+ /// EmitVTableDefinition - Emit the definition of the given vtable.
+ void EmitVTableDefinition(llvm::GlobalVariable *VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+
+ /// GenerateConstructionVTable - Generate a construction vtable for the given
+ /// base subobject.
+ llvm::GlobalVariable *
+ GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ VTableAddressPointsMapTy& AddressPoints);
+
+ llvm::GlobalVariable *getVTT(const CXXRecordDecl *RD);
+
+ /// EmitThunks - Emit the associated thunks for the given global decl.
+ void EmitThunks(GlobalDecl GD);
+
+ /// GenerateClassData - Generate all the class data required to be generated
+ /// upon definition of a KeyFunction. This includes the vtable, the
+ /// rtti data structure and the VTT.
+ ///
+ /// \param Linkage - The desired linkage of the vtable, the RTTI and the VTT.
+ void GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
new file mode 100644
index 0000000..f57ecd2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
@@ -0,0 +1,327 @@
+//===-- CGValue.h - LLVM CodeGen wrappers for llvm::Value* ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes implement wrappers around llvm::Value in order to
+// fully represent the range of values for C L- and R- values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVALUE_H
+#define CLANG_CODEGEN_CGVALUE_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+
+namespace llvm {
+ class Constant;
+ class Value;
+}
+
+namespace clang {
+ class ObjCPropertyRefExpr;
+ class ObjCImplicitSetterGetterRefExpr;
+
+namespace CodeGen {
+ class CGBitFieldInfo;
+
+/// RValue - This trivial value class is used to represent the result of an
+/// expression that is evaluated. It can be one of three things: either a
+/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
+/// address of an aggregate value in memory.
+class RValue {
+ enum Flavor { Scalar, Complex, Aggregate };
+
+ // Stores first value and flavor.
+ llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
+ // Stores second value and volatility.
+ llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
+
+public:
+ bool isScalar() const { return V1.getInt() == Scalar; }
+ bool isComplex() const { return V1.getInt() == Complex; }
+ bool isAggregate() const { return V1.getInt() == Aggregate; }
+
+ bool isVolatileQualified() const { return V2.getInt(); }
+
+ /// getScalarVal() - Return the Value* of this scalar value.
+ llvm::Value *getScalarVal() const {
+ assert(isScalar() && "Not a scalar!");
+ return V1.getPointer();
+ }
+
+ /// getComplexVal - Return the real/imag components of this complex value.
+ ///
+ std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
+ return std::make_pair(V1.getPointer(), V2.getPointer());
+ }
+
+ /// getAggregateAddr() - Return the Value* of the address of the aggregate.
+ llvm::Value *getAggregateAddr() const {
+ assert(isAggregate() && "Not an aggregate!");
+ return V1.getPointer();
+ }
+
+ static RValue get(llvm::Value *V) {
+ RValue ER;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Scalar);
+ ER.V2.setInt(false);
+ return ER;
+ }
+ static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
+ RValue ER;
+ ER.V1.setPointer(V1);
+ ER.V2.setPointer(V2);
+ ER.V1.setInt(Complex);
+ ER.V2.setInt(false);
+ return ER;
+ }
+ static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
+ return getComplex(C.first, C.second);
+ }
+ // FIXME: Aggregate rvalues need to retain information about whether they are
+ // volatile or not. Remove default to find all places that probably get this
+ // wrong.
+ static RValue getAggregate(llvm::Value *V, bool Volatile = false) {
+ RValue ER;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Aggregate);
+ ER.V2.setInt(Volatile);
+ return ER;
+ }
+};
+
+
+/// LValue - This represents an lvalue references. Because C/C++ allow
+/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
+/// bitrange.
+class LValue {
+ // FIXME: alignment?
+
+ enum {
+ Simple, // This is a normal l-value, use getAddress().
+ VectorElt, // This is a vector element l-value (V[i]), use getVector*
+ BitField, // This is a bitfield l-value, use getBitfield*.
+ ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
+ PropertyRef, // This is an Objective-C property reference, use
+ // getPropertyRefExpr
+ KVCRef // This is an objective-c 'implicit' property ref,
+ // use getKVCRefExpr
+ } LVType;
+
+ llvm::Value *V;
+
+ union {
+ // Index into a vector subscript: V[i]
+ llvm::Value *VectorIdx;
+
+ // ExtVector element subset: V.xyx
+ llvm::Constant *VectorElts;
+
+ // BitField start bit and size
+ const CGBitFieldInfo *BitFieldInfo;
+
+ // Obj-C property reference expression
+ const ObjCPropertyRefExpr *PropertyRefExpr;
+
+ // ObjC 'implicit' property reference expression
+ const ObjCImplicitSetterGetterRefExpr *KVCRefExpr;
+ };
+
+ // 'const' is unused here
+ Qualifiers Quals;
+
+ /// The alignment to use when accessing this lvalue.
+ unsigned short Alignment;
+
+ // objective-c's ivar
+ bool Ivar:1;
+
+ // objective-c's ivar is an array
+ bool ObjIsArray:1;
+
+ // LValue is non-gc'able for any reason, including being a parameter or local
+ // variable.
+ bool NonGC: 1;
+
+ // Lvalue is a global reference of an objective-c object
+ bool GlobalObjCRef : 1;
+
+ // Lvalue is a thread local reference
+ bool ThreadLocalRef : 1;
+
+ Expr *BaseIvarExp;
+private:
+ void Initialize(Qualifiers Quals, unsigned Alignment = 0) {
+ this->Quals = Quals;
+ this->Alignment = Alignment;
+ assert(this->Alignment == Alignment && "Alignment exceeds allowed max!");
+
+ // Initialize Objective-C flags.
+ this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
+ this->ThreadLocalRef = false;
+ this->BaseIvarExp = 0;
+ }
+
+public:
+ bool isSimple() const { return LVType == Simple; }
+ bool isVectorElt() const { return LVType == VectorElt; }
+ bool isBitField() const { return LVType == BitField; }
+ bool isExtVectorElt() const { return LVType == ExtVectorElt; }
+ bool isPropertyRef() const { return LVType == PropertyRef; }
+ bool isKVCRef() const { return LVType == KVCRef; }
+
+ bool isVolatileQualified() const { return Quals.hasVolatile(); }
+ bool isRestrictQualified() const { return Quals.hasRestrict(); }
+ unsigned getVRQualifiers() const {
+ return Quals.getCVRQualifiers() & ~Qualifiers::Const;
+ }
+
+ bool isObjCIvar() const { return Ivar; }
+ void setObjCIvar(bool Value) { Ivar = Value; }
+
+ bool isObjCArray() const { return ObjIsArray; }
+ void setObjCArray(bool Value) { ObjIsArray = Value; }
+
+ bool isNonGC () const { return NonGC; }
+ void setNonGC(bool Value) { NonGC = Value; }
+
+ bool isGlobalObjCRef() const { return GlobalObjCRef; }
+ void setGlobalObjCRef(bool Value) { GlobalObjCRef = Value; }
+
+ bool isThreadLocalRef() const { return ThreadLocalRef; }
+ void setThreadLocalRef(bool Value) { ThreadLocalRef = Value;}
+
+ bool isObjCWeak() const {
+ return Quals.getObjCGCAttr() == Qualifiers::Weak;
+ }
+ bool isObjCStrong() const {
+ return Quals.getObjCGCAttr() == Qualifiers::Strong;
+ }
+
+ Expr *getBaseIvarExp() const { return BaseIvarExp; }
+ void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
+
+ const Qualifiers &getQuals() const { return Quals; }
+ Qualifiers &getQuals() { return Quals; }
+
+ unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
+
+ unsigned getAlignment() const { return Alignment; }
+
+ // simple lvalue
+ llvm::Value *getAddress() const { assert(isSimple()); return V; }
+
+ // vector elt lvalue
+ llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
+ llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
+
+ // extended vector elements.
+ llvm::Value *getExtVectorAddr() const { assert(isExtVectorElt()); return V; }
+ llvm::Constant *getExtVectorElts() const {
+ assert(isExtVectorElt());
+ return VectorElts;
+ }
+
+ // bitfield lvalue
+ llvm::Value *getBitFieldBaseAddr() const {
+ assert(isBitField());
+ return V;
+ }
+ const CGBitFieldInfo &getBitFieldInfo() const {
+ assert(isBitField());
+ return *BitFieldInfo;
+ }
+
+ // property ref lvalue
+ const ObjCPropertyRefExpr *getPropertyRefExpr() const {
+ assert(isPropertyRef());
+ return PropertyRefExpr;
+ }
+
+ // 'implicit' property ref lvalue
+ const ObjCImplicitSetterGetterRefExpr *getKVCRefExpr() const {
+ assert(isKVCRef());
+ return KVCRefExpr;
+ }
+
+ static LValue MakeAddr(llvm::Value *V, QualType T, unsigned Alignment,
+ ASTContext &Context) {
+ Qualifiers Quals = Context.getCanonicalType(T).getQualifiers();
+ Quals.setObjCGCAttr(Context.getObjCGCAttrKind(T));
+
+ LValue R;
+ R.LVType = Simple;
+ R.V = V;
+ R.Initialize(Quals, Alignment);
+ return R;
+ }
+
+ static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
+ unsigned CVR) {
+ LValue R;
+ R.LVType = VectorElt;
+ R.V = Vec;
+ R.VectorIdx = Idx;
+ R.Initialize(Qualifiers::fromCVRMask(CVR));
+ return R;
+ }
+
+ static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
+ unsigned CVR) {
+ LValue R;
+ R.LVType = ExtVectorElt;
+ R.V = Vec;
+ R.VectorElts = Elts;
+ R.Initialize(Qualifiers::fromCVRMask(CVR));
+ return R;
+ }
+
+ /// \brief Create a new object to represent a bit-field access.
+ ///
+ /// \param BaseValue - The base address of the structure containing the
+ /// bit-field.
+ /// \param Info - The information describing how to perform the bit-field
+ /// access.
+ static LValue MakeBitfield(llvm::Value *BaseValue, const CGBitFieldInfo &Info,
+ unsigned CVR) {
+ LValue R;
+ R.LVType = BitField;
+ R.V = BaseValue;
+ R.BitFieldInfo = &Info;
+ R.Initialize(Qualifiers::fromCVRMask(CVR));
+ return R;
+ }
+
+ // FIXME: It is probably bad that we aren't emitting the target when we build
+ // the lvalue. However, this complicates the code a bit, and I haven't figured
+ // out how to make it go wrong yet.
+ static LValue MakePropertyRef(const ObjCPropertyRefExpr *E,
+ unsigned CVR) {
+ LValue R;
+ R.LVType = PropertyRef;
+ R.PropertyRefExpr = E;
+ R.Initialize(Qualifiers::fromCVRMask(CVR));
+ return R;
+ }
+
+ static LValue MakeKVCRef(const ObjCImplicitSetterGetterRefExpr *E,
+ unsigned CVR) {
+ LValue R;
+ R.LVType = KVCRef;
+ R.KVCRefExpr = E;
+ R.Initialize(Qualifiers::fromCVRMask(CVR));
+ return R;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt b/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt
new file mode 100644
index 0000000..b5a2329
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt
@@ -0,0 +1,41 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangCodeGen
+ BackendUtil.cpp
+ CGBlocks.cpp
+ CGBuiltin.cpp
+ CGCall.cpp
+ CGClass.cpp
+ CGCXX.cpp
+ CGDebugInfo.cpp
+ CGDecl.cpp
+ CGDeclCXX.cpp
+ CGException.cpp
+ CGExpr.cpp
+ CGExprAgg.cpp
+ CGExprComplex.cpp
+ CGExprConstant.cpp
+ CGExprCXX.cpp
+ CGExprScalar.cpp
+ CGObjC.cpp
+ CGObjCGNU.cpp
+ CGObjCMac.cpp
+ CGRecordLayoutBuilder.cpp
+ CGRTTI.cpp
+ CGStmt.cpp
+ CGTemporaries.cpp
+ CGVTables.cpp
+ CGVTT.cpp
+ CodeGenAction.cpp
+ CodeGenFunction.cpp
+ CodeGenModule.cpp
+ CodeGenTypes.cpp
+ ItaniumCXXABI.cpp
+ Mangle.cpp
+ MicrosoftCXXABI.cpp
+ ModuleBuilder.cpp
+ TargetInfo.cpp
+ )
+
+add_dependencies(clangCodeGen ClangAttrClasses ClangAttrList ClangDeclNodes
+ ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
new file mode 100644
index 0000000..51c55a1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
@@ -0,0 +1,348 @@
+//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/Timer.h"
+using namespace clang;
+using namespace llvm;
+
+namespace {
+ class BackendConsumer : public ASTConsumer {
+ Diagnostic &Diags;
+ BackendAction Action;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ llvm::raw_ostream *AsmOutStream;
+ ASTContext *Context;
+
+ Timer LLVMIRGeneration;
+
+ llvm::OwningPtr<CodeGenerator> Gen;
+
+ llvm::OwningPtr<llvm::Module> TheModule;
+
+ public:
+ BackendConsumer(BackendAction action, Diagnostic &_Diags,
+ const CodeGenOptions &compopts,
+ const TargetOptions &targetopts, bool TimePasses,
+ const std::string &infile, llvm::raw_ostream *OS,
+ LLVMContext &C) :
+ Diags(_Diags),
+ Action(action),
+ CodeGenOpts(compopts),
+ TargetOpts(targetopts),
+ AsmOutStream(OS),
+ LLVMIRGeneration("LLVM IR Generation Time"),
+ Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)) {
+ llvm::TimePassesIsEnabled = TimePasses;
+ }
+
+ llvm::Module *takeModule() { return TheModule.take(); }
+
+ virtual void Initialize(ASTContext &Ctx) {
+ Context = &Ctx;
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->Initialize(Ctx);
+
+ TheModule.reset(Gen->GetModule());
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTopLevelDecl(D);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &C) {
+ {
+ PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTranslationUnit(C);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ // Silently ignore if we weren't initialized for some reason.
+ if (!TheModule)
+ return;
+
+ // Make sure IR generation is happy with the module. This is released by
+ // the module provider.
+ Module *M = Gen->ReleaseModule();
+ if (!M) {
+ // The module has been released by IR gen on failures, do not double
+ // free.
+ TheModule.take();
+ return;
+ }
+
+ assert(TheModule.get() == M &&
+ "Unexpected module change during IR generation");
+
+ // Install an inline asm handler so that diagnostics get printed through
+ // our diagnostics hooks.
+ LLVMContext &Ctx = TheModule->getContext();
+ void *OldHandler = Ctx.getInlineAsmDiagnosticHandler();
+ void *OldContext = Ctx.getInlineAsmDiagnosticContext();
+ Ctx.setInlineAsmDiagnosticHandler((void*)(intptr_t)InlineAsmDiagHandler,
+ this);
+
+ EmitBackendOutput(Diags, CodeGenOpts, TargetOpts,
+ TheModule.get(), Action, AsmOutStream);
+
+ Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
+ }
+
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+ Gen->HandleTagDeclDefinition(D);
+ }
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ Gen->CompleteTentativeDefinition(D);
+ }
+
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+ Gen->HandleVTable(RD, DefinitionRequired);
+ }
+
+ static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context,
+ unsigned LocCookie) {
+ SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie);
+ ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
+ }
+
+ void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
+ SourceLocation LocCookie);
+ };
+}
+
+/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr
+/// buffer to be a valid FullSourceLoc.
+static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
+ SourceManager &CSM) {
+ // Get both the clang and llvm source managers. The location is relative to
+ // a memory buffer that the LLVM Source Manager is handling, we need to add
+ // a copy to the Clang source manager.
+ const llvm::SourceMgr &LSM = *D.getSourceMgr();
+
+ // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr
+ // already owns its one and clang::SourceManager wants to own its one.
+ const MemoryBuffer *LBuf =
+ LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
+
+ // Create the copy and transfer ownership to clang::SourceManager.
+ llvm::MemoryBuffer *CBuf =
+ llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(),
+ LBuf->getBufferIdentifier());
+ FileID FID = CSM.createFileIDForMemBuffer(CBuf);
+
+ // Translate the offset into the file.
+ unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
+ SourceLocation NewLoc =
+ CSM.getLocForStartOfFile(FID).getFileLocWithOffset(Offset);
+ return FullSourceLoc(NewLoc, CSM);
+}
+
+
+/// InlineAsmDiagHandler2 - This function is invoked when the backend hits an
+/// error parsing inline asm. The SMDiagnostic indicates the error relative to
+/// the temporary memory buffer that the inline asm parser has set up.
+void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
+ SourceLocation LocCookie) {
+ // There are a couple of different kinds of errors we could get here. First,
+ // we re-format the SMDiagnostic in terms of a clang diagnostic.
+
+ // Strip "error: " off the start of the message string.
+ llvm::StringRef Message = D.getMessage();
+ if (Message.startswith("error: "))
+ Message = Message.substr(7);
+
+ // If the SMDiagnostic has an inline asm source location, translate it.
+ FullSourceLoc Loc;
+ if (D.getLoc() != SMLoc())
+ Loc = ConvertBackendLocation(D, Context->getSourceManager());
+
+
+ // If this problem has clang-level source location information, report the
+ // issue as being an error in the source with a note showing the instantiated
+ // code.
+ if (LocCookie.isValid()) {
+ Diags.Report(FullSourceLoc(LocCookie, Context->getSourceManager()),
+ diag::err_fe_inline_asm).AddString(Message);
+
+ if (D.getLoc().isValid())
+ Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ return;
+ }
+
+ // Otherwise, report the backend error as occuring in the generated .s file.
+ // If Loc is invalid, we still need to report the error, it just gets no
+ // location info.
+ Diags.Report(Loc, diag::err_fe_inline_asm).AddString(Message);
+}
+
+//
+
+CodeGenAction::CodeGenAction(unsigned _Act) : Act(_Act) {}
+
+CodeGenAction::~CodeGenAction() {}
+
+bool CodeGenAction::hasIRSupport() const { return true; }
+
+void CodeGenAction::EndSourceFileAction() {
+ // If the consumer creation failed, do nothing.
+ if (!getCompilerInstance().hasASTConsumer())
+ return;
+
+ // Steal the module from the consumer.
+ BackendConsumer *Consumer = static_cast<BackendConsumer*>(
+ &getCompilerInstance().getASTConsumer());
+
+ TheModule.reset(Consumer->takeModule());
+}
+
+llvm::Module *CodeGenAction::takeModule() {
+ return TheModule.take();
+}
+
+static raw_ostream *GetOutputStream(CompilerInstance &CI,
+ llvm::StringRef InFile,
+ BackendAction Action) {
+ switch (Action) {
+ case Backend_EmitAssembly:
+ return CI.createDefaultOutputFile(false, InFile, "s");
+ case Backend_EmitLL:
+ return CI.createDefaultOutputFile(false, InFile, "ll");
+ case Backend_EmitBC:
+ return CI.createDefaultOutputFile(true, InFile, "bc");
+ case Backend_EmitNothing:
+ return 0;
+ case Backend_EmitMCNull:
+ case Backend_EmitObj:
+ return CI.createDefaultOutputFile(true, InFile, "o");
+ }
+
+ assert(0 && "Invalid action!");
+ return 0;
+}
+
+ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ llvm::OwningPtr<llvm::raw_ostream> OS(GetOutputStream(CI, InFile, BA));
+ if (BA != Backend_EmitNothing && !OS)
+ return 0;
+
+ return new BackendConsumer(BA, CI.getDiagnostics(),
+ CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
+ CI.getLLVMContext());
+}
+
+void CodeGenAction::ExecuteAction() {
+ // If this is an IR file, we have to treat it specially.
+ if (getCurrentFileKind() == IK_LLVM_IR) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = GetOutputStream(CI, getCurrentFile(), BA);
+ if (BA != Backend_EmitNothing && !OS)
+ return;
+
+ bool Invalid;
+ SourceManager &SM = CI.getSourceManager();
+ const llvm::MemoryBuffer *MainFile = SM.getBuffer(SM.getMainFileID(),
+ &Invalid);
+ if (Invalid)
+ return;
+
+ // FIXME: This is stupid, IRReader shouldn't take ownership.
+ llvm::MemoryBuffer *MainFileCopy =
+ llvm::MemoryBuffer::getMemBufferCopy(MainFile->getBuffer(),
+ getCurrentFile().c_str());
+
+ llvm::SMDiagnostic Err;
+ TheModule.reset(ParseIR(MainFileCopy, Err, CI.getLLVMContext()));
+ if (!TheModule) {
+ // Translate from the diagnostic info to the SourceManager location.
+ SourceLocation Loc = SM.getLocation(
+ SM.getFileEntryForID(SM.getMainFileID()), Err.getLineNo(),
+ Err.getColumnNo() + 1);
+
+ // Get a custom diagnostic for the error. We strip off a leading
+ // diagnostic code if there is one.
+ llvm::StringRef Msg = Err.getMessage();
+ if (Msg.startswith("error: "))
+ Msg = Msg.substr(7);
+ unsigned DiagID = CI.getDiagnostics().getCustomDiagID(Diagnostic::Error,
+ Msg);
+
+ CI.getDiagnostics().Report(FullSourceLoc(Loc, SM), DiagID);
+ return;
+ }
+
+ EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(),
+ CI.getTargetOpts(), TheModule.get(),
+ BA, OS);
+ return;
+ }
+
+ // Otherwise follow the normal AST path.
+ this->ASTFrontendAction::ExecuteAction();
+}
+
+//
+
+EmitAssemblyAction::EmitAssemblyAction()
+ : CodeGenAction(Backend_EmitAssembly) {}
+
+EmitBCAction::EmitBCAction() : CodeGenAction(Backend_EmitBC) {}
+
+EmitLLVMAction::EmitLLVMAction() : CodeGenAction(Backend_EmitLL) {}
+
+EmitLLVMOnlyAction::EmitLLVMOnlyAction() : CodeGenAction(Backend_EmitNothing) {}
+
+EmitCodeGenOnlyAction::EmitCodeGenOnlyAction() : CodeGenAction(Backend_EmitMCNull) {}
+
+EmitObjAction::EmitObjAction() : CodeGenAction(Backend_EmitObj) {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
new file mode 100644
index 0000000..51d084e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -0,0 +1,1341 @@
+//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-function state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "CGDebugInfo.h"
+#include "CGException.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
+ : BlockFunction(cgm, *this, Builder), CGM(cgm),
+ Target(CGM.getContext().Target),
+ Builder(cgm.getModule().getContext()),
+ NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
+ ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
+ SwitchInsn(0), CaseRangeBlock(0),
+ DidCallStackSave(false), UnreachableBlock(0),
+ CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
+ ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
+ TrapBB(0) {
+
+ // Get some frequently used types.
+ LLVMPointerWidth = Target.getPointerWidth(0);
+ llvm::LLVMContext &LLVMContext = CGM.getLLVMContext();
+ IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+
+ Exceptions = getContext().getLangOptions().Exceptions;
+ CatchUndefined = getContext().getLangOptions().CatchUndefined;
+ CGM.getCXXABI().getMangleContext().startNewFunction();
+}
+
+ASTContext &CodeGenFunction::getContext() const {
+ return CGM.getContext();
+}
+
+
+const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
+ return CGM.getTypes().ConvertTypeForMem(T);
+}
+
+const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+}
+
+bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
+ return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
+ T->isObjCObjectType();
+}
+
+void CodeGenFunction::EmitReturnBlock() {
+ // For cleanliness, we try to avoid emitting the return block for
+ // simple cases.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (CurBB) {
+ assert(!CurBB->getTerminator() && "Unexpected terminated block.");
+
+ // We have a valid insert point, reuse it if it is empty or there are no
+ // explicit jumps to the return block.
+ if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
+ ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
+ delete ReturnBlock.getBlock();
+ } else
+ EmitBlock(ReturnBlock.getBlock());
+ return;
+ }
+
+ // Otherwise, if the return block is the target of a single direct
+ // branch then we can just put the code in that block instead. This
+ // cleans up functions which started with a unified return block.
+ if (ReturnBlock.getBlock()->hasOneUse()) {
+ llvm::BranchInst *BI =
+ dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
+ if (BI && BI->isUnconditional() &&
+ BI->getSuccessor(0) == ReturnBlock.getBlock()) {
+ // Reset insertion point and delete the branch.
+ Builder.SetInsertPoint(BI->getParent());
+ BI->eraseFromParent();
+ delete ReturnBlock.getBlock();
+ return;
+ }
+ }
+
+ // FIXME: We are at an unreachable point, there is no reason to emit the block
+ // unless it has uses. However, we still need a place to put the debug
+ // region.end for now.
+
+ EmitBlock(ReturnBlock.getBlock());
+}
+
+static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
+ if (!BB) return;
+ if (!BB->use_empty())
+ return CGF.CurFn->getBasicBlockList().push_back(BB);
+ delete BB;
+}
+
+void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
+ assert(BreakContinueStack.empty() &&
+ "mismatched push/pop in break/continue stack!");
+
+ // Emit function epilog (to return).
+ EmitReturnBlock();
+
+ EmitFunctionInstrumentation("__cyg_profile_func_exit");
+
+ // Emit debug descriptor for function end.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(EndLoc);
+ DI->EmitFunctionEnd(Builder);
+ }
+
+ EmitFunctionEpilog(*CurFnInfo);
+ EmitEndEHSpec(CurCodeDecl);
+
+ assert(EHStack.empty() &&
+ "did not remove all scopes from cleanup stack!");
+
+ // If someone did an indirect goto, emit the indirect goto block at the end of
+ // the function.
+ if (IndirectBranch) {
+ EmitBlock(IndirectBranch->getParent());
+ Builder.ClearInsertionPoint();
+ }
+
+ // Remove the AllocaInsertPt instruction, which is just a convenience for us.
+ llvm::Instruction *Ptr = AllocaInsertPt;
+ AllocaInsertPt = 0;
+ Ptr->eraseFromParent();
+
+ // If someone took the address of a label but never did an indirect goto, we
+ // made a zero entry PHI node, which is illegal, zap it now.
+ if (IndirectBranch) {
+ llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
+ if (PN->getNumIncomingValues() == 0) {
+ PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
+ PN->eraseFromParent();
+ }
+ }
+
+ EmitIfUsed(*this, RethrowBlock.getBlock());
+ EmitIfUsed(*this, TerminateLandingPad);
+ EmitIfUsed(*this, TerminateHandler);
+ EmitIfUsed(*this, UnreachableBlock);
+
+ if (CGM.getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
+}
+
+/// ShouldInstrumentFunction - Return true if the current function should be
+/// instrumented with __cyg_profile_func_* calls
+bool CodeGenFunction::ShouldInstrumentFunction() {
+ if (!CGM.getCodeGenOpts().InstrumentFunctions)
+ return false;
+ if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
+ return false;
+ return true;
+}
+
+/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+/// instrumentation function with the current function and the call site, if
+/// function instrumentation is enabled.
+void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
+ if (!ShouldInstrumentFunction())
+ return;
+
+ const llvm::PointerType *PointerTy;
+ const llvm::FunctionType *FunctionTy;
+ std::vector<const llvm::Type*> ProfileFuncArgs;
+
+ // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
+ PointerTy = llvm::Type::getInt8PtrTy(VMContext);
+ ProfileFuncArgs.push_back(PointerTy);
+ ProfileFuncArgs.push_back(PointerTy);
+ FunctionTy = llvm::FunctionType::get(
+ llvm::Type::getVoidTy(VMContext),
+ ProfileFuncArgs, false);
+
+ llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
+ llvm::CallInst *CallSite = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
+ llvm::ConstantInt::get(Int32Ty, 0),
+ "callsite");
+
+ Builder.CreateCall2(F,
+ llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
+ CallSite);
+}
+
+void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
+ llvm::Function *Fn,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc) {
+ const Decl *D = GD.getDecl();
+
+ DidCallStackSave = false;
+ CurCodeDecl = CurFuncDecl = D;
+ FnRetTy = RetTy;
+ CurFn = Fn;
+ assert(CurFn->isDeclaration() && "Function already has body?");
+
+ // Pass inline keyword to optimizer if it appears explicitly on any
+ // declaration.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
+ RE = FD->redecls_end(); RI != RE; ++RI)
+ if (RI->isInlineSpecified()) {
+ Fn->addFnAttr(llvm::Attribute::InlineHint);
+ break;
+ }
+
+ llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
+
+ // Create a marker to make it easy to insert allocas into the entryblock
+ // later. Don't create this with the builder, because we don't want it
+ // folded.
+ llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
+ AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
+ if (Builder.isNamePreserving())
+ AllocaInsertPt->setName("allocapt");
+
+ ReturnBlock = getJumpDestInCurrentScope("return");
+
+ Builder.SetInsertPoint(EntryBB);
+
+ QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
+ false, false, 0, 0,
+ /*FIXME?*/
+ FunctionType::ExtInfo());
+
+ // Emit subprogram debug descriptor.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(StartLoc);
+ DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
+ }
+
+ EmitFunctionInstrumentation("__cyg_profile_func_enter");
+
+ // FIXME: Leaked.
+ // CC info is ignored, hopefully?
+ CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
+ FunctionType::ExtInfo());
+
+ if (RetTy->isVoidType()) {
+ // Void type; nothing to return.
+ ReturnValue = 0;
+ } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType())) {
+ // Indirect aggregate return; emit returned value directly into sret slot.
+ // This reduces code size, and affects correctness in C++.
+ ReturnValue = CurFn->arg_begin();
+ } else {
+ ReturnValue = CreateIRTemp(RetTy, "retval");
+ }
+
+ EmitStartEHSpec(CurCodeDecl);
+ EmitFunctionProlog(*CurFnInfo, CurFn, Args);
+
+ if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+
+ // If any of the arguments have a variably modified type, make sure to
+ // emit the type size.
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ QualType Ty = i->second;
+
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ }
+}
+
+void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
+ const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
+ assert(FD->getBody());
+ EmitStmt(FD->getBody());
+}
+
+/// Tries to mark the given function nounwind based on the
+/// non-existence of any throwing calls within it. We believe this is
+/// lightweight enough to do at -O0.
+static void TryMarkNoThrow(llvm::Function *F) {
+ // LLVM treats 'nounwind' on a function as part of the type, so we
+ // can't do this on functions that can be overwritten.
+ if (F->mayBeOverridden()) return;
+
+ for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
+ for (llvm::BasicBlock::iterator
+ BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
+ if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI))
+ if (!Call->doesNotThrow())
+ return;
+ F->setDoesNotThrow(true);
+}
+
+void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ // Check if we should generate debug info for this function.
+ if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getDebugInfo();
+
+ FunctionArgList Args;
+ QualType ResTy = FD->getResultType();
+
+ CurGD = GD;
+ if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
+ CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
+
+ if (FD->getNumParams()) {
+ const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
+ assert(FProto && "Function def must have prototype!");
+
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
+ Args.push_back(std::make_pair(FD->getParamDecl(i),
+ FProto->getArgType(i)));
+ }
+
+ SourceRange BodyRange;
+ if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
+
+ // Emit the standard function prologue.
+ StartFunction(GD, ResTy, Fn, Args, BodyRange.getBegin());
+
+ // Generate the body of the function.
+ if (isa<CXXDestructorDecl>(FD))
+ EmitDestructorBody(Args);
+ else if (isa<CXXConstructorDecl>(FD))
+ EmitConstructorBody(Args);
+ else
+ EmitFunctionBody(Args);
+
+ // Emit the standard function epilogue.
+ FinishFunction(BodyRange.getEnd());
+
+ // If we haven't marked the function nothrow through other means, do
+ // a quick pass now to see if we can.
+ if (!CurFn->doesNotThrow())
+ TryMarkNoThrow(CurFn);
+}
+
+/// ContainsLabel - Return true if the statement contains a label in it. If
+/// this statement is not executed normally, it not containing a label means
+/// that we can just remove the code.
+bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
+ // Null statement, not a label!
+ if (S == 0) return false;
+
+ // If this is a label, we have to emit the code, consider something like:
+ // if (0) { ... foo: bar(); } goto foo;
+ if (isa<LabelStmt>(S))
+ return true;
+
+ // If this is a case/default statement, and we haven't seen a switch, we have
+ // to emit the code.
+ if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
+ return true;
+
+ // If this is a switch statement, we want to ignore cases below it.
+ if (isa<SwitchStmt>(S))
+ IgnoreCaseStmts = true;
+
+ // Scan subexpressions for verboten labels.
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I != E; ++I)
+ if (ContainsLabel(*I, IgnoreCaseStmts))
+ return true;
+
+ return false;
+}
+
+
+/// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
+/// a constant, or if it does but contains a label, return 0. If it constant
+/// folds to 'true' and does not contain a label, return 1, if it constant folds
+/// to 'false' and does not contain a label, return -1.
+int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
+ // FIXME: Rename and handle conversion of other evaluatable things
+ // to bool.
+ Expr::EvalResult Result;
+ if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
+ Result.HasSideEffects)
+ return 0; // Not foldable, not integer or not fully evaluatable.
+
+ if (CodeGenFunction::ContainsLabel(Cond))
+ return 0; // Contains a label.
+
+ return Result.Val.getInt().getBoolValue() ? 1 : -1;
+}
+
+
+/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
+/// statement) to the specified blocks. Based on the condition, this might try
+/// to simplify the codegen of the conditional based on the branch.
+///
+void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
+ llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
+ return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
+
+ if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
+ // Handle X && Y in a condition.
+ if (CondBOp->getOpcode() == BO_LAnd) {
+ // If we have "1 && X", simplify the code. "0 && X" would have constant
+ // folded if the case was simple enough.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
+ // br(1 && X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X && 1", simplify the code to use an uncond branch.
+ // "X && 0" would have been constant folded to 0.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
+ // br(X && 1) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is false, we
+ // want to jump to the FalseBlock.
+ llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
+ EmitBlock(LHSTrue);
+
+ // Any temporaries created here are conditional.
+ BeginConditionalBranch();
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ EndConditionalBranch();
+
+ return;
+ } else if (CondBOp->getOpcode() == BO_LOr) {
+ // If we have "0 || X", simplify the code. "1 || X" would have constant
+ // folded if the case was simple enough.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
+ // br(0 || X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X || 0", simplify the code to use an uncond branch.
+ // "X || 1" would have been constant folded to 1.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
+ // br(X || 0) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is true, we
+ // want to jump to the TrueBlock.
+ llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
+ EmitBlock(LHSFalse);
+
+ // Any temporaries created here are conditional.
+ BeginConditionalBranch();
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ EndConditionalBranch();
+
+ return;
+ }
+ }
+
+ if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
+ // br(!x, t, f) -> br(x, f, t)
+ if (CondUOp->getOpcode() == UO_LNot)
+ return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
+ }
+
+ if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
+ // Handle ?: operator.
+
+ // Just ignore GNU ?: extension.
+ if (CondOp->getLHS()) {
+ // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+ EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+ EmitBlock(LHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+ EmitBlock(RHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+ return;
+ }
+ }
+
+ // Emit the code with the fully general case.
+ llvm::Value *CondV = EvaluateExprAsBool(Cond);
+ Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ CGM.ErrorUnsupported(S, Type, OmitOnError);
+}
+
+void
+CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
+ // Ignore empty classes in C++.
+ if (getContext().getLangOptions().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
+ return;
+ }
+ }
+
+ // Cast the dest ptr to the appropriate i8 pointer type.
+ unsigned DestAS =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+ const llvm::Type *BP =
+ llvm::Type::getInt8PtrTy(VMContext, DestAS);
+ if (DestPtr->getType() != BP)
+ DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
+
+ // Get size and alignment info for this aggregate.
+ std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+ uint64_t Size = TypeInfo.first;
+ unsigned Align = TypeInfo.second;
+
+ // Don't bother emitting a zero-byte memset.
+ if (Size == 0)
+ return;
+
+ llvm::ConstantInt *SizeVal = llvm::ConstantInt::get(IntPtrTy, Size / 8);
+ llvm::ConstantInt *AlignVal = Builder.getInt32(Align / 8);
+
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ if (!CGM.getTypes().isZeroInitializable(Ty)) {
+ llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
+
+ llvm::GlobalVariable *NullVariable =
+ new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ NullConstant, llvm::Twine());
+ llvm::Value *SrcPtr =
+ Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
+
+ // FIXME: variable-size types?
+
+ // Get and call the appropriate llvm.memcpy overload.
+ llvm::Constant *Memcpy =
+ CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(), IntPtrTy);
+ Builder.CreateCall5(Memcpy, DestPtr, SrcPtr, SizeVal, AlignVal,
+ /*volatile*/ Builder.getFalse());
+ return;
+ }
+
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
+
+ // FIXME: Handle variable sized types.
+ Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
+ Builder.getInt8(0),
+ SizeVal, AlignVal, /*volatile*/ Builder.getFalse());
+}
+
+llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
+ // Make sure that there is a block for the indirect goto.
+ if (IndirectBranch == 0)
+ GetIndirectGotoBlock();
+
+ llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
+
+ // Make sure the indirect branch includes all of the address-taken blocks.
+ IndirectBranch->addDestination(BB);
+ return llvm::BlockAddress::get(CurFn, BB);
+}
+
+llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
+ // If we already made the indirect branch for indirect goto, return its block.
+ if (IndirectBranch) return IndirectBranch->getParent();
+
+ CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ // Create the PHI node that indirect gotos will add entries to.
+ llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
+
+ // Create the indirect branch instruction.
+ IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
+ return IndirectBranch->getParent();
+}
+
+llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
+ llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
+
+ assert(SizeEntry && "Did not emit size for type");
+ return SizeEntry;
+}
+
+llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
+ assert(Ty->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
+
+ EnsureInsertPoint();
+
+ if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
+ llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
+
+ if (!SizeEntry) {
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // Get the element size;
+ QualType ElemTy = VAT->getElementType();
+ llvm::Value *ElemSize;
+ if (ElemTy->isVariableArrayType())
+ ElemSize = EmitVLASize(ElemTy);
+ else
+ ElemSize = llvm::ConstantInt::get(SizeTy,
+ getContext().getTypeSizeInChars(ElemTy).getQuantity());
+
+ llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
+ NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
+
+ SizeEntry = Builder.CreateMul(ElemSize, NumElements);
+ }
+
+ return SizeEntry;
+ }
+
+ if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
+ EmitVLASize(AT->getElementType());
+ return 0;
+ }
+
+ const PointerType *PT = Ty->getAs<PointerType>();
+ assert(PT && "unknown VM type!");
+ EmitVLASize(PT->getPointeeType());
+ return 0;
+}
+
+llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
+ if (CGM.getContext().getBuiltinVaListType()->isArrayType())
+ return EmitScalarExpr(E);
+ return EmitLValue(E).getAddress();
+}
+
+/// Pops cleanup blocks until the given savepoint is reached.
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+ assert(Old.isValid());
+
+ while (EHStack.stable_begin() != Old) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+
+ // As long as Old strictly encloses the scope's enclosing normal
+ // cleanup, we're going to emit another normal cleanup which
+ // fallthrough can propagate through.
+ bool FallThroughIsBranchThrough =
+ Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
+
+ PopCleanupBlock(FallThroughIsBranchThrough);
+ }
+}
+
+static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isNormalCleanup());
+ llvm::BasicBlock *Entry = Scope.getNormalBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("cleanup");
+ Scope.setNormalBlock(Entry);
+ }
+ return Entry;
+}
+
+static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isEHCleanup());
+ llvm::BasicBlock *Entry = Scope.getEHBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("eh.cleanup");
+ Scope.setEHBlock(Entry);
+ }
+ return Entry;
+}
+
+/// Transitions the terminator of the given exit-block of a cleanup to
+/// be a cleanup switch.
+static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
+ llvm::BasicBlock *Block) {
+ // If it's a branch, turn it into a switch whose default
+ // destination is its original target.
+ llvm::TerminatorInst *Term = Block->getTerminator();
+ assert(Term && "can't transition block without terminator");
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional());
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
+ Br->eraseFromParent();
+ return Switch;
+ } else {
+ return cast<llvm::SwitchInst>(Term);
+ }
+}
+
+/// Attempts to reduce a cleanup's entry block to a fallthrough. This
+/// is basically llvm::MergeBlockIntoPredecessor, except
+/// simplified/optimized for the tighter constraints on cleanup blocks.
+///
+/// Returns the new block, whatever it is.
+static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry) {
+ llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
+ if (!Pred) return Entry;
+
+ llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
+ if (!Br || Br->isConditional()) return Entry;
+ assert(Br->getSuccessor(0) == Entry);
+
+ // If we were previously inserting at the end of the cleanup entry
+ // block, we'll need to continue inserting at the end of the
+ // predecessor.
+ bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
+ assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
+
+ // Kill the branch.
+ Br->eraseFromParent();
+
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
+
+ // Kill the entry block.
+ Entry->eraseFromParent();
+
+ if (WasInsertBlock)
+ CGF.Builder.SetInsertPoint(Pred);
+
+ return Pred;
+}
+
+static void EmitCleanup(CodeGenFunction &CGF,
+ EHScopeStack::Cleanup *Fn,
+ bool ForEH) {
+ if (ForEH) CGF.EHStack.pushTerminate();
+ Fn->Emit(CGF, ForEH);
+ if (ForEH) CGF.EHStack.popTerminate();
+ assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
+ assert(!EHStack.empty() && "cleanup stack is empty!");
+ assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+ assert(Scope.isActive() && "cleanup was still inactive when popped!");
+
+ // Check whether we need an EH cleanup. This is only true if we've
+ // generated a lazy EH cleanup block.
+ bool RequiresEHCleanup = Scope.hasEHBranches();
+
+ // Check the three conditions which might require a normal cleanup:
+
+ // - whether there are branch fix-ups through this cleanup
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
+
+ // - whether there are branch-throughs or branch-afters
+ bool HasExistingBranches = Scope.hasBranches();
+
+ // - whether there's a fallthrough
+ llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
+ bool HasFallthrough = (FallthroughSource != 0);
+
+ bool RequiresNormalCleanup = false;
+ if (Scope.isNormalCleanup() &&
+ (HasFixups || HasExistingBranches || HasFallthrough)) {
+ RequiresNormalCleanup = true;
+ }
+
+ // If we don't need the cleanup at all, we're done.
+ if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+ EHStack.popCleanup(); // safe because there are no fixups
+ assert(EHStack.getNumBranchFixups() == 0 ||
+ EHStack.hasNormalCleanups());
+ return;
+ }
+
+ // Copy the cleanup emission data out. Note that SmallVector
+ // guarantees maximal alignment for its buffer regardless of its
+ // type parameter.
+ llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
+ CleanupBuffer.reserve(Scope.getCleanupSize());
+ memcpy(CleanupBuffer.data(),
+ Scope.getCleanupBuffer(), Scope.getCleanupSize());
+ CleanupBuffer.set_size(Scope.getCleanupSize());
+ EHScopeStack::Cleanup *Fn =
+ reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
+
+ // We want to emit the EH cleanup after the normal cleanup, but go
+ // ahead and do the setup for the EH cleanup while the scope is still
+ // alive.
+ llvm::BasicBlock *EHEntry = 0;
+ llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend;
+ if (RequiresEHCleanup) {
+ EHEntry = CreateEHEntry(*this, Scope);
+
+ // Figure out the branch-through dest if necessary.
+ llvm::BasicBlock *EHBranchThroughDest = 0;
+ if (Scope.hasEHBranchThroughs()) {
+ assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end());
+ EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup());
+ EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S));
+ }
+
+ // If we have exactly one branch-after and no branch-throughs, we
+ // can dispatch it without a switch.
+ if (!Scope.hasEHBranchThroughs() &&
+ Scope.getNumEHBranchAfters() == 1) {
+ assert(!EHBranchThroughDest);
+
+ // TODO: remove the spurious eh.cleanup.dest stores if this edge
+ // never went through any switches.
+ llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0);
+ EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest));
+
+ // Otherwise, if we have any branch-afters, we need a switch.
+ } else if (Scope.getNumEHBranchAfters()) {
+ // The default of the switch belongs to the branch-throughs if
+ // they exist.
+ llvm::BasicBlock *Default =
+ (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock());
+
+ const unsigned SwitchCapacity = Scope.getNumEHBranchAfters();
+
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest");
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ EHInstsToAppend.push_back(Load);
+ EHInstsToAppend.push_back(Switch);
+
+ for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I)
+ Switch->addCase(Scope.getEHBranchAfterIndex(I),
+ Scope.getEHBranchAfterBlock(I));
+
+ // Otherwise, we have only branch-throughs; jump to the next EH
+ // cleanup.
+ } else {
+ assert(EHBranchThroughDest);
+ EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest));
+ }
+ }
+
+ if (!RequiresNormalCleanup) {
+ EHStack.popCleanup();
+ } else {
+ // As a kindof crazy internal case, branch-through fall-throughs
+ // leave the insertion point set to the end of the last cleanup.
+ bool HasPrebranchedFallthrough =
+ (HasFallthrough && FallthroughSource->getTerminator());
+ assert(!HasPrebranchedFallthrough ||
+ FallthroughSource->getTerminator()->getSuccessor(0)
+ == Scope.getNormalBlock());
+
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (HasFallthrough && !HasPrebranchedFallthrough &&
+ !HasFixups && !HasExistingBranches) {
+
+ // Fixups can cause us to optimistically create a normal block,
+ // only to later have no real uses for it. Just delete it in
+ // this case.
+ // TODO: we can potentially simplify all the uses after this.
+ if (Scope.getNormalBlock()) {
+ Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock());
+ delete Scope.getNormalBlock();
+ }
+
+ EHStack.popCleanup();
+
+ EmitCleanup(*this, Fn, /*ForEH*/ false);
+
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+ } else {
+ // Force the entry block to exist.
+ llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (HasFallthrough && !HasPrebranchedFallthrough)
+ Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
+
+ // Emit the entry block. This implicitly branches to it if we
+ // have fallthrough. All the fixups and existing branches should
+ // already be branched to it.
+ EmitBlock(NormalEntry);
+
+ bool HasEnclosingCleanups =
+ (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ llvm::BasicBlock *BranchThroughDest = 0;
+ if (Scope.hasBranchThroughs() ||
+ (HasFallthrough && FallthroughIsBranchThrough) ||
+ (HasFixups && HasEnclosingCleanups)) {
+ assert(HasEnclosingCleanups);
+ EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
+ }
+
+ llvm::BasicBlock *FallthroughDest = 0;
+ llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
+ Scope.getNumBranchAfters() == 1) {
+ assert(!BranchThroughDest);
+
+ // TODO: clean up the possibly dead stores to the cleanup dest slot.
+ llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
+
+ // Build a switch-out if we need it:
+ // - if there are branch-afters threaded through the scope
+ // - if fall-through is a branch-after
+ // - if there are fixups that have nowhere left to go and
+ // so must be immediately resolved
+ } else if (Scope.getNumBranchAfters() ||
+ (HasFallthrough && !FallthroughIsBranchThrough) ||
+ (HasFixups && !HasEnclosingCleanups)) {
+
+ llvm::BasicBlock *Default =
+ (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
+
+ // TODO: base this on the number of branch-afters and fixups
+ const unsigned SwitchCapacity = 10;
+
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ InstsToAppend.push_back(Load);
+ InstsToAppend.push_back(Switch);
+
+ // Branch-after fallthrough.
+ if (HasFallthrough && !FallthroughIsBranchThrough) {
+ FallthroughDest = createBasicBlock("cleanup.cont");
+ Switch->addCase(Builder.getInt32(0), FallthroughDest);
+ }
+
+ for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
+ Switch->addCase(Scope.getBranchAfterIndex(I),
+ Scope.getBranchAfterBlock(I));
+ }
+
+ if (HasFixups && !HasEnclosingCleanups)
+ ResolveAllBranchFixups(Switch);
+ } else {
+ // We should always have a branch-through destination in this case.
+ assert(BranchThroughDest);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
+ }
+
+ // We're finally ready to pop the cleanup.
+ EHStack.popCleanup();
+ assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
+
+ EmitCleanup(*this, Fn, /*ForEH*/ false);
+
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
+ NormalExit->getInstList().push_back(InstsToAppend[I]);
+
+ // Optimistically hope that any fixups will continue falling through.
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I) {
+ BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
+ if (!Fixup.Destination) continue;
+ if (!Fixup.OptimisticBranchBlock) {
+ new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
+ getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, NormalEntry);
+ }
+ Fixup.OptimisticBranchBlock = NormalExit;
+ }
+
+ if (FallthroughDest)
+ EmitBlock(FallthroughDest);
+ else if (!HasFallthrough)
+ Builder.ClearInsertionPoint();
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ llvm::BasicBlock *NewNormalEntry =
+ SimplifyCleanupEntry(*this, NormalEntry);
+
+ // If it did invalidate those pointers, and NormalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I)
+ CGF.EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
+ }
+ }
+
+ assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
+
+ // Emit the EH cleanup if required.
+ if (RequiresEHCleanup) {
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ EmitBlock(EHEntry);
+ EmitCleanup(*this, Fn, /*ForEH*/ true);
+
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I)
+ EHExit->getInstList().push_back(EHInstsToAppend[I]);
+
+ Builder.restoreIP(SavedIP);
+
+ SimplifyCleanupEntry(*this, EHEntry);
+ }
+}
+
+/// Terminate the current block by emitting a branch which might leave
+/// the current cleanup-protected scope. The target scope may not yet
+/// be known, in which case this will require a fixup.
+///
+/// As a side-effect, this method clears the insertion point.
+void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
+ assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup())
+ && "stale jump destination");
+
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator
+ TopCleanup = EHStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!Dest.getScopeDepth().isValid()) {
+ BranchFixup &Fixup = EHStack.addBranchFixup();
+ Fixup.Destination = Dest.getBlock();
+ Fixup.DestinationIndex = Dest.getDestIndex();
+ Fixup.InitialBranch = BI;
+ Fixup.OptimisticBranchBlock = 0;
+
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // Otherwise, thread through all the normal cleanups in scope.
+
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
+
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(TopCleanup));
+ BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
+ }
+
+ // Add this destination to all the scopes involved.
+ EHScopeStack::stable_iterator I = TopCleanup;
+ EHScopeStack::stable_iterator E = Dest.getScopeDepth();
+ if (E.strictlyEncloses(I)) {
+ while (true) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isNormalCleanup());
+ I = Scope.getEnclosingNormalCleanup();
+
+ // If this is the last cleanup we're propagating through, tell it
+ // that there's a resolved jump moving through it.
+ if (!E.strictlyEncloses(I)) {
+ Scope.addBranchAfter(Index, Dest.getBlock());
+ break;
+ }
+
+ // Otherwise, tell the scope that there's a jump propoagating
+ // through it. If this isn't new information, all the rest of
+ // the work has been done before.
+ if (!Scope.addBranchThrough(Dest.getBlock()))
+ break;
+ }
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) {
+ // We should never get invalid scope depths for an UnwindDest; that
+ // implies that the destination wasn't set up correctly.
+ assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?");
+
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
+
+ // Calculate the innermost active cleanup.
+ EHScopeStack::stable_iterator
+ InnermostCleanup = EHStack.getInnermostActiveEHCleanup();
+
+ // If the destination is in the same EH cleanup scope as us, we
+ // don't need to thread through anything.
+ if (InnermostCleanup.encloses(Dest.getScopeDepth())) {
+ Builder.ClearInsertionPoint();
+ return;
+ }
+ assert(InnermostCleanup != EHStack.stable_end());
+
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI);
+
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(InnermostCleanup));
+ BI->setSuccessor(0, CreateEHEntry(*this, Scope));
+ }
+
+ // Add this destination to all the scopes involved.
+ for (EHScopeStack::stable_iterator
+ I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) {
+ assert(E.strictlyEncloses(I));
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isEHCleanup());
+ I = Scope.getEnclosingEHCleanup();
+
+ // If this is the last cleanup we're propagating through, add this
+ // as a branch-after.
+ if (I == E) {
+ Scope.addEHBranchAfter(Index, Dest.getBlock());
+ break;
+ }
+
+ // Otherwise, add it as a branch-through. If this isn't new
+ // information, all the rest of the work has been done before.
+ if (!Scope.addEHBranchThrough(Dest.getBlock()))
+ break;
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+/// All the branch fixups on the EH stack have propagated out past the
+/// outermost normal cleanup; resolve them all by adding cases to the
+/// given switch instruction.
+void CodeGenFunction::ResolveAllBranchFixups(llvm::SwitchInst *Switch) {
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
+
+ for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination isn't set or if we've
+ // already treated it.
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (Fixup.Destination == 0) continue;
+ if (!CasesAdded.insert(Fixup.Destination)) continue;
+
+ Switch->addCase(Builder.getInt32(Fixup.DestinationIndex),
+ Fixup.Destination);
+ }
+
+ EHStack.clearFixups();
+}
+
+void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
+ assert(Block && "resolving a null target block");
+ if (!EHStack.getNumBranchFixups()) return;
+
+ assert(EHStack.hasNormalCleanups() &&
+ "branch fixups exist with no normal cleanups on stack");
+
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
+ bool ResolvedAny = false;
+
+ for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination doesn't match.
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (Fixup.Destination != Block) continue;
+
+ Fixup.Destination = 0;
+ ResolvedAny = true;
+
+ // If it doesn't have an optimistic branch block, LatestBranch is
+ // already pointing to the right place.
+ llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
+ if (!BranchBB)
+ continue;
+
+ // Don't process the same optimistic branch block twice.
+ if (!ModifiedOptimisticBlocks.insert(BranchBB))
+ continue;
+
+ llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
+
+ // Add a case to the switch.
+ Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
+ }
+
+ if (ResolvedAny)
+ EHStack.popNullFixups();
+}
+
+/// Activate a cleanup that was created in an inactivated state.
+void CodeGenFunction::ActivateCleanup(EHScopeStack::stable_iterator C) {
+ assert(C != EHStack.stable_end() && "activating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(!Scope.isActive() && "double activation");
+
+ // Calculate whether the cleanup was used:
+ bool Used = false;
+
+ // - as a normal cleanup
+ if (Scope.isNormalCleanup()) {
+ bool NormalUsed = false;
+ if (Scope.getNormalBlock()) {
+ NormalUsed = true;
+ } else {
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostNormalCleanup(); I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getNormalBlock()) {
+ NormalUsed = true;
+ break;
+ }
+ I = S.getEnclosingNormalCleanup();
+ }
+ }
+
+ if (NormalUsed)
+ Used = true;
+ else
+ Scope.setActivatedBeforeNormalUse(true);
+ }
+
+ // - as an EH cleanup
+ if (Scope.isEHCleanup()) {
+ bool EHUsed = false;
+ if (Scope.getEHBlock()) {
+ EHUsed = true;
+ } else {
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostEHCleanup(); I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getEHBlock()) {
+ EHUsed = true;
+ break;
+ }
+ I = S.getEnclosingEHCleanup();
+ }
+ }
+
+ if (EHUsed)
+ Used = true;
+ else
+ Scope.setActivatedBeforeEHUse(true);
+ }
+
+ llvm::AllocaInst *Var = EHCleanupScope::activeSentinel();
+ if (Used) {
+ Var = CreateTempAlloca(Builder.getInt1Ty());
+ InitTempAlloca(Var, Builder.getFalse());
+ }
+ Scope.setActiveVar(Var);
+}
+
+llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
+ if (!NormalCleanupDest)
+ NormalCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
+ return NormalCleanupDest;
+}
+
+llvm::Value *CodeGenFunction::getEHCleanupDestSlot() {
+ if (!EHCleanupDest)
+ EHCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot");
+ return EHCleanupDest;
+}
+
+void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
+ llvm::ConstantInt *Init) {
+ assert (Init && "Invalid DeclRefExpr initializer!");
+ if (CGDebugInfo *Dbg = getDebugInfo())
+ Dbg->EmitGlobalVariable(E->getDecl(), Init, Builder);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
new file mode 100644
index 0000000..4f04205
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -0,0 +1,1746 @@
+//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-function state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
+#define CLANG_CODEGEN_CODEGENFUNCTION_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ValueHandle.h"
+#include "CodeGenModule.h"
+#include "CGBlocks.h"
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGCXX.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class BasicBlock;
+ class LLVMContext;
+ class MDNode;
+ class Module;
+ class SwitchInst;
+ class Twine;
+ class Value;
+ class CallSite;
+}
+
+namespace clang {
+ class APValue;
+ class ASTContext;
+ class CXXDestructorDecl;
+ class CXXTryStmt;
+ class Decl;
+ class EnumConstantDecl;
+ class FunctionDecl;
+ class FunctionProtoType;
+ class LabelStmt;
+ class ObjCContainerDecl;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCPropertyImplDecl;
+ class TargetInfo;
+ class TargetCodeGenInfo;
+ class VarDecl;
+ class ObjCForCollectionStmt;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+
+namespace CodeGen {
+ class CodeGenTypes;
+ class CGDebugInfo;
+ class CGFunctionInfo;
+ class CGRecordLayout;
+ class CGBlockInfo;
+ class CGCXXABI;
+
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ llvm::BasicBlock *OptimisticBranchBlock;
+
+ /// The ultimate destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ llvm::BasicBlock *Destination;
+
+ /// The destination index value.
+ unsigned DestinationIndex;
+
+ /// The initial branch of the fixup.
+ llvm::BranchInst *InitialBranch;
+};
+
+enum CleanupKind {
+ EHCleanup = 0x1,
+ NormalCleanup = 0x2,
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ InactiveCleanup = 0x4,
+ InactiveEHCleanup = EHCleanup | InactiveCleanup,
+ InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
+ InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from StartOfData to EndOfBuffer.
+ ptrdiff_t Size;
+
+ stable_iterator(ptrdiff_t Size) : Size(Size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() : Size(-1) {}
+
+ bool isValid() const { return Size >= 0; }
+
+ /// Returns true if this scope encloses I.
+ /// Returns false if I is invalid.
+ /// This scope must be valid.
+ bool encloses(stable_iterator I) const { return Size <= I.Size; }
+
+ /// Returns true if this scope strictly encloses I: that is,
+ /// if it encloses I and is not I.
+ /// Returns false is I is invalid.
+ /// This scope must be valid.
+ bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.Size == B.Size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.Size != B.Size;
+ }
+ };
+
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
+ ///
+ /// Cleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class Cleanup {
+ public:
+ // Anchor the construction vtable. We use the destructor because
+ // gcc gives an obnoxious warning if there are virtual methods
+ // with an accessible non-virtual destructor. Unfortunately,
+ // declaring this destructor makes it non-trivial, but there
+ // doesn't seem to be any other way around this warning.
+ //
+ // This destructor will never be called.
+ virtual ~Cleanup();
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param IsForEHCleanup true if this is for an EH cleanup, false
+ /// if for a normal cleanup.
+ virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
+ };
+
+private:
+ // The implementation for this class is in CGException.h and
+ // CGException.cpp; the definition is here because it's used as a
+ // member of CodeGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ char *StartOfBuffer;
+
+ /// The end of the buffer.
+ char *EndOfBuffer;
+
+ /// The first valid entry in the buffer.
+ char *StartOfData;
+
+ /// The innermost normal cleanup on the stack.
+ stable_iterator InnermostNormalCleanup;
+
+ /// The innermost EH cleanup on the stack.
+ stable_iterator InnermostEHCleanup;
+
+ /// The number of catches on the stack.
+ unsigned CatchDepth;
+
+ /// The current EH destination index. Reset to FirstCatchIndex
+ /// whenever the last EH cleanup is popped.
+ unsigned NextEHDestIndex;
+ enum { FirstEHDestIndex = 1 };
+
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ llvm::SmallVector<BranchFixup, 8> BranchFixups;
+
+ char *allocate(size_t Size);
+
+ void *pushCleanup(CleanupKind K, size_t DataSize);
+
+public:
+ EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
+ InnermostNormalCleanup(stable_end()),
+ InnermostEHCleanup(stable_end()),
+ CatchDepth(0), NextEHDestIndex(FirstEHDestIndex) {}
+ ~EHScopeStack() { delete[] StartOfBuffer; }
+
+ // Variadic templates would make this not terrible.
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T>
+ void pushCleanup(CleanupKind Kind) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T();
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0>
+ void pushCleanup(CleanupKind Kind, A0 a0) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2, class A3>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2, class A3, class A4>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
+ (void) Obj;
+ }
+
+ /// Pops a cleanup scope off the stack. This should only be called
+ /// by CodeGenFunction::PopCleanupBlock.
+ void popCleanup();
+
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned NumHandlers);
+
+ /// Pops a catch scope off the stack.
+ void popCatch();
+
+ /// Push an exceptions filter on the stack.
+ class EHFilterScope *pushFilter(unsigned NumFilters);
+
+ /// Pops an exceptions filter off the stack.
+ void popFilter();
+
+ /// Push a terminate handler on the stack.
+ void pushTerminate();
+
+ /// Pops a terminate handler off the stack.
+ void popTerminate();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return StartOfData == EndOfBuffer; }
+
+ bool requiresLandingPad() const {
+ return (CatchDepth || hasEHCleanups());
+ }
+
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return InnermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return InnermostNormalCleanup;
+ }
+ stable_iterator getInnermostActiveNormalCleanup() const; // CGException.h
+
+ /// Determines whether there are any EH cleanups on the stack.
+ bool hasEHCleanups() const {
+ return InnermostEHCleanup != stable_end();
+ }
+
+ /// Returns the innermost EH cleanup on the stack, or stable_end()
+ /// if there are no EH cleanups.
+ stable_iterator getInnermostEHCleanup() const {
+ return InnermostEHCleanup;
+ }
+ stable_iterator getInnermostActiveEHCleanup() const; // CGException.h
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Returns an iterator pointing to the outermost EH scope.
+ iterator end() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(EndOfBuffer - StartOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() {
+ return stable_iterator(0);
+ }
+
+ /// Translates an iterator into a stable_iterator.
+ stable_iterator stabilize(iterator it) const;
+
+ /// Finds the nearest cleanup enclosing the given iterator.
+ /// Returns stable_iterator::invalid() if there are no such cleanups.
+ stable_iterator getEnclosingEHCleanup(iterator it) const;
+
+ /// Turn a stable reference to a scope depth into a unstable pointer
+ /// to the EH stack.
+ iterator find(stable_iterator save) const;
+
+ /// Removes the cleanup pointed to by the given stable_iterator.
+ void removeCleanup(stable_iterator save);
+
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ BranchFixups.push_back(BranchFixup());
+ return BranchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return BranchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned I) {
+ assert(I < getNumBranchFixups());
+ return BranchFixups[I];
+ }
+
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
+
+ /// Clears the branch-fixups list. This should only be called by
+ /// CodeGenFunction::ResolveAllBranchFixups.
+ void clearFixups() { BranchFixups.clear(); }
+
+ /// Gets the next EH destination index.
+ unsigned getNextEHDestIndex() { return NextEHDestIndex++; }
+};
+
+/// CodeGenFunction - This class organizes the per-function state that is used
+/// while generating LLVM code.
+class CodeGenFunction : public BlockFunction {
+ CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
+
+ friend class CGCXXABI;
+public:
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
+ struct JumpDest {
+ JumpDest() : Block(0), ScopeDepth(), Index(0) {}
+ JumpDest(llvm::BasicBlock *Block,
+ EHScopeStack::stable_iterator Depth,
+ unsigned Index)
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
+
+ bool isValid() const { return Block != 0; }
+ llvm::BasicBlock *getBlock() const { return Block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
+ unsigned getDestIndex() const { return Index; }
+
+ private:
+ llvm::BasicBlock *Block;
+ EHScopeStack::stable_iterator ScopeDepth;
+ unsigned Index;
+ };
+
+ /// An unwind destination is an abstract label, branching to which
+ /// may require a jump out through EH cleanups.
+ struct UnwindDest {
+ UnwindDest() : Block(0), ScopeDepth(), Index(0) {}
+ UnwindDest(llvm::BasicBlock *Block,
+ EHScopeStack::stable_iterator Depth,
+ unsigned Index)
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
+
+ bool isValid() const { return Block != 0; }
+ llvm::BasicBlock *getBlock() const { return Block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
+ unsigned getDestIndex() const { return Index; }
+
+ private:
+ llvm::BasicBlock *Block;
+ EHScopeStack::stable_iterator ScopeDepth;
+ unsigned Index;
+ };
+
+ CodeGenModule &CGM; // Per-module state.
+ const TargetInfo &Target;
+
+ typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
+ CGBuilderTy Builder;
+
+ /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
+ /// This excludes BlockDecls.
+ const Decl *CurFuncDecl;
+ /// CurCodeDecl - This is the inner-most code context, which includes blocks.
+ const Decl *CurCodeDecl;
+ const CGFunctionInfo *CurFnInfo;
+ QualType FnRetTy;
+ llvm::Function *CurFn;
+
+ /// CurGD - The GlobalDecl for the current function being compiled.
+ GlobalDecl CurGD;
+
+ /// ReturnBlock - Unified return block.
+ JumpDest ReturnBlock;
+
+ /// ReturnValue - The temporary alloca to hold the return value. This is null
+ /// iff the function has no return value.
+ llvm::Value *ReturnValue;
+
+ /// RethrowBlock - Unified rethrow block.
+ UnwindDest RethrowBlock;
+
+ /// AllocaInsertPoint - This is an instruction in the entry block before which
+ /// we prefer to insert allocas.
+ llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+
+ // intptr_t, i32, i64
+ const llvm::IntegerType *IntPtrTy, *Int32Ty, *Int64Ty;
+ uint32_t LLVMPointerWidth;
+
+ bool Exceptions;
+ bool CatchUndefined;
+
+ /// \brief A mapping from NRVO variables to the flags used to indicate
+ /// when the NRVO has been applied to this variable.
+ llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
+
+ EHScopeStack EHStack;
+
+ /// i32s containing the indexes of the cleanup destinations.
+ llvm::AllocaInst *NormalCleanupDest;
+ llvm::AllocaInst *EHCleanupDest;
+
+ unsigned NextCleanupDestIndex;
+
+ /// The exception slot. All landing pads write the current
+ /// exception pointer into this alloca.
+ llvm::Value *ExceptionSlot;
+
+ /// Emits a landing pad for the current EH stack.
+ llvm::BasicBlock *EmitLandingPad();
+
+ llvm::BasicBlock *getInvokeDestImpl();
+
+public:
+ /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+ /// rethrows.
+ llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
+
+ // A struct holding information about a finally block's IR
+ // generation. For now, doesn't actually hold anything.
+ struct FinallyInfo {
+ };
+
+ FinallyInfo EnterFinallyBlock(const Stmt *Stmt,
+ llvm::Constant *BeginCatchFn,
+ llvm::Constant *EndCatchFn,
+ llvm::Constant *RethrowFn);
+ void ExitFinallyBlock(FinallyInfo &FinallyInfo);
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object destructor of an object of the given type at the
+ /// given address. Does nothing if T is not a C++ class type with a
+ /// non-trivial destructor.
+ void PushDestructorCleanup(QualType T, llvm::Value *Addr);
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object variant of the given destructor on the object at
+ /// the given address.
+ void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
+ llvm::Value *Addr);
+
+ /// PopCleanupBlock - Will pop the cleanup entry on the stack and
+ /// process all branch fixups.
+ void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
+
+ void ActivateCleanup(EHScopeStack::stable_iterator Cleanup);
+
+ /// \brief Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
+ CodeGenFunction& CGF;
+ EHScopeStack::stable_iterator CleanupStackDepth;
+ bool OldDidCallStackSave;
+ bool PerformCleanup;
+
+ RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
+
+ public:
+ /// \brief Enter a new cleanup scope.
+ explicit RunCleanupsScope(CodeGenFunction &CGF)
+ : CGF(CGF), PerformCleanup(true)
+ {
+ CleanupStackDepth = CGF.EHStack.stable_begin();
+ OldDidCallStackSave = CGF.DidCallStackSave;
+ }
+
+ /// \brief Exit this cleanup scope, emitting any accumulated
+ /// cleanups.
+ ~RunCleanupsScope() {
+ if (PerformCleanup) {
+ CGF.DidCallStackSave = OldDidCallStackSave;
+ CGF.PopCleanupBlocks(CleanupStackDepth);
+ }
+ }
+
+ /// \brief Determine whether this scope requires any cleanups.
+ bool requiresCleanups() const {
+ return CGF.EHStack.stable_begin() != CleanupStackDepth;
+ }
+
+ /// \brief Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void ForceCleanup() {
+ assert(PerformCleanup && "Already forced cleanup");
+ CGF.DidCallStackSave = OldDidCallStackSave;
+ CGF.PopCleanupBlocks(CleanupStackDepth);
+ PerformCleanup = false;
+ }
+ };
+
+
+ /// PopCleanupBlocks - Takes the old cleanup stack size and emits
+ /// the cleanup blocks that have been added.
+ void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
+
+ void ResolveAllBranchFixups(llvm::SwitchInst *Switch);
+ void ResolveBranchFixups(llvm::BasicBlock *Target);
+
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
+ return JumpDest(Target,
+ EHStack.getInnermostNormalCleanup(),
+ NextCleanupDestIndex++);
+ }
+
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(const char *Name = 0) {
+ return getJumpDestInCurrentScope(createBasicBlock(Name));
+ }
+
+ /// EmitBranchThroughCleanup - Emit a branch from the current insert
+ /// block through the normal cleanup handling code (if any) and then
+ /// on to \arg Dest.
+ void EmitBranchThroughCleanup(JumpDest Dest);
+
+ /// EmitBranchThroughEHCleanup - Emit a branch from the current
+ /// insert block through the EH cleanup handling code (if any) and
+ /// then on to \arg Dest.
+ void EmitBranchThroughEHCleanup(UnwindDest Dest);
+
+ /// getRethrowDest - Returns the unified outermost-scope rethrow
+ /// destination.
+ UnwindDest getRethrowDest();
+
+ /// BeginConditionalBranch - Should be called before a conditional part of an
+ /// expression is emitted. For example, before the RHS of the expression below
+ /// is emitted:
+ ///
+ /// b && f(T());
+ ///
+ /// This is used to make sure that any temporaries created in the conditional
+ /// branch are only destroyed if the branch is taken.
+ void BeginConditionalBranch() {
+ ++ConditionalBranchLevel;
+ }
+
+ /// EndConditionalBranch - Should be called after a conditional part of an
+ /// expression has been emitted.
+ void EndConditionalBranch() {
+ assert(ConditionalBranchLevel != 0 &&
+ "Conditional branch mismatch!");
+
+ --ConditionalBranchLevel;
+ }
+
+private:
+ CGDebugInfo *DebugInfo;
+
+ /// IndirectBranch - The first time an indirect goto is seen we create a block
+ /// with an indirect branch. Every time we see the address of a label taken,
+ /// we add the label to the indirect goto. Every subsequent indirect goto is
+ /// codegen'd as a jump to the IndirectBranch's basic block.
+ llvm::IndirectBrInst *IndirectBranch;
+
+ /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
+ /// decls.
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+
+ /// LabelMap - This keeps track of the LLVM basic block for each C label.
+ llvm::DenseMap<const LabelStmt*, JumpDest> LabelMap;
+
+ // BreakContinueStack - This keeps track of where break and continue
+ // statements should jump to.
+ struct BreakContinue {
+ BreakContinue(JumpDest Break, JumpDest Continue)
+ : BreakBlock(Break), ContinueBlock(Continue) {}
+
+ JumpDest BreakBlock;
+ JumpDest ContinueBlock;
+ };
+ llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
+
+ /// SwitchInsn - This is nearest current switch instruction. It is null if if
+ /// current context is not in a switch.
+ llvm::SwitchInst *SwitchInsn;
+
+ /// CaseRangeBlock - This block holds if condition check for last case
+ /// statement range in current switch instruction.
+ llvm::BasicBlock *CaseRangeBlock;
+
+ // VLASizeMap - This keeps track of the associated size for each VLA type.
+ // We track this by the size expression rather than the type itself because
+ // in certain situations, like a const qualifier applied to an VLA typedef,
+ // multiple VLA types can share the same size expression.
+ // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
+ // enter/leave scopes.
+ llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
+
+ /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
+ /// calling llvm.stacksave for multiple VLAs in the same scope.
+ bool DidCallStackSave;
+
+ /// A block containing a single 'unreachable' instruction. Created
+ /// lazily by getUnreachableBlock().
+ llvm::BasicBlock *UnreachableBlock;
+
+ /// CXXThisDecl - When generating code for a C++ member function,
+ /// this will hold the implicit 'this' declaration.
+ ImplicitParamDecl *CXXThisDecl;
+ llvm::Value *CXXThisValue;
+
+ /// CXXVTTDecl - When generating code for a base object constructor or
+ /// base object destructor with virtual bases, this will hold the implicit
+ /// VTT parameter.
+ ImplicitParamDecl *CXXVTTDecl;
+ llvm::Value *CXXVTTValue;
+
+ /// ConditionalBranchLevel - Contains the nesting level of the current
+ /// conditional branch. This is used so that we know if a temporary should be
+ /// destroyed conditionally.
+ unsigned ConditionalBranchLevel;
+
+
+ /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
+ /// type as well as the field number that contains the actual data.
+ llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
+ unsigned> > ByRefValueInfo;
+
+ /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
+ /// number that holds the value.
+ unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
+
+ llvm::BasicBlock *TerminateLandingPad;
+ llvm::BasicBlock *TerminateHandler;
+ llvm::BasicBlock *TrapBB;
+
+public:
+ CodeGenFunction(CodeGenModule &cgm);
+
+ CodeGenTypes &getTypes() const { return CGM.getTypes(); }
+ ASTContext &getContext() const;
+ CGDebugInfo *getDebugInfo() { return DebugInfo; }
+
+ /// Returns a pointer to the function's exception object slot, which
+ /// is assigned in every landing pad.
+ llvm::Value *getExceptionSlot();
+
+ llvm::Value *getNormalCleanupDestSlot();
+ llvm::Value *getEHCleanupDestSlot();
+
+ llvm::BasicBlock *getUnreachableBlock() {
+ if (!UnreachableBlock) {
+ UnreachableBlock = createBasicBlock("unreachable");
+ new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
+ }
+ return UnreachableBlock;
+ }
+
+ llvm::BasicBlock *getInvokeDest() {
+ if (!EHStack.requiresLandingPad()) return 0;
+ return getInvokeDestImpl();
+ }
+
+ llvm::LLVMContext &getLLVMContext() { return VMContext; }
+
+ //===--------------------------------------------------------------------===//
+ // Objective-C
+ //===--------------------------------------------------------------------===//
+
+ void GenerateObjCMethod(const ObjCMethodDecl *OMD);
+
+ void StartObjCMethod(const ObjCMethodDecl *MD,
+ const ObjCContainerDecl *CD);
+
+ /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
+ void GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+ void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD, bool ctor);
+
+ /// GenerateObjCSetter - Synthesize an Objective-C property setter function
+ /// for the given property.
+ void GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+ bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
+ bool IvarTypeWithAggrGCObjects(QualType Ty);
+
+ //===--------------------------------------------------------------------===//
+ // Block Bits
+ //===--------------------------------------------------------------------===//
+
+ llvm::Value *BuildBlockLiteralTmp(const BlockExpr *);
+ llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
+ const CGBlockInfo &Info,
+ const llvm::StructType *,
+ llvm::Constant *BlockVarLayout,
+ std::vector<HelperInfo> *);
+
+ llvm::Function *GenerateBlockFunction(GlobalDecl GD,
+ const BlockExpr *BExpr,
+ CGBlockInfo &Info,
+ const Decl *OuterFuncDecl,
+ llvm::Constant *& BlockVarLayout,
+ llvm::DenseMap<const Decl*, llvm::Value*> ldm);
+
+ llvm::Value *LoadBlockStruct();
+
+ void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
+ void AllocateBlockDecl(const BlockDeclRefExpr *E);
+ llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
+ return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
+ }
+ llvm::Value *GetAddrOfBlockDecl(const ValueDecl *D, bool ByRef);
+ const llvm::Type *BuildByRefType(const ValueDecl *D);
+
+ void GenerateCode(GlobalDecl GD, llvm::Function *Fn);
+ void StartFunction(GlobalDecl GD, QualType RetTy,
+ llvm::Function *Fn,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc);
+
+ void EmitConstructorBody(FunctionArgList &Args);
+ void EmitDestructorBody(FunctionArgList &Args);
+ void EmitFunctionBody(FunctionArgList &Args);
+
+ /// EmitReturnBlock - Emit the unified return block, trying to avoid its
+ /// emission when possible.
+ void EmitReturnBlock();
+
+ /// FinishFunction - Complete IR generation of the current function. It is
+ /// legal to call this function even if there is no current insertion point.
+ void FinishFunction(SourceLocation EndLoc=SourceLocation());
+
+ /// GenerateThunk - Generate a thunk for the given method.
+ void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk);
+
+ void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
+ FunctionArgList &Args);
+
+ /// InitializeVTablePointer - Initialize the vtable pointer of the given
+ /// subobject.
+ ///
+ void InitializeVTablePointer(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass);
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+ void InitializeVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy& VBases);
+
+ void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
+
+
+ /// EnterDtorCleanups - Enter the cleanups necessary to complete the
+ /// given phase of destruction for a destructor. The end result
+ /// should call destructors on members and base classes in reverse
+ /// order of their construction.
+ void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
+
+ /// ShouldInstrumentFunction - Return true if the current function should be
+ /// instrumented with __cyg_profile_func_* calls
+ bool ShouldInstrumentFunction();
+
+ /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+ /// instrumentation function with the current function and the call site, if
+ /// function instrumentation is enabled.
+ void EmitFunctionInstrumentation(const char *Fn);
+
+ /// EmitFunctionProlog - Emit the target specific LLVM code to load the
+ /// arguments for the given function. This is also responsible for naming the
+ /// LLVM function arguments.
+ void EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args);
+
+ /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
+ /// given temporary.
+ void EmitFunctionEpilog(const CGFunctionInfo &FI);
+
+ /// EmitStartEHSpec - Emit the start of the exception spec.
+ void EmitStartEHSpec(const Decl *D);
+
+ /// EmitEndEHSpec - Emit the end of the exception spec.
+ void EmitEndEHSpec(const Decl *D);
+
+ /// getTerminateLandingPad - Return a landing pad that just calls terminate.
+ llvm::BasicBlock *getTerminateLandingPad();
+
+ /// getTerminateHandler - Return a handler (not a landing pad, just
+ /// a catch handler) that just calls terminate. This is used when
+ /// a terminate scope encloses a try.
+ llvm::BasicBlock *getTerminateHandler();
+
+ const llvm::Type *ConvertTypeForMem(QualType T);
+ const llvm::Type *ConvertType(QualType T);
+ const llvm::Type *ConvertType(const TypeDecl *T) {
+ return ConvertType(getContext().getTypeDeclType(T));
+ }
+
+ /// LoadObjCSelf - Load the value of self. This function is only valid while
+ /// generating code for an Objective-C method.
+ llvm::Value *LoadObjCSelf();
+
+ /// TypeOfSelfObject - Return type of object that this self represents.
+ QualType TypeOfSelfObject();
+
+ /// hasAggregateLLVMType - Return true if the specified AST type will map into
+ /// an aggregate LLVM type or is void.
+ static bool hasAggregateLLVMType(QualType T);
+
+ /// createBasicBlock - Create an LLVM basic block.
+ llvm::BasicBlock *createBasicBlock(const char *Name="",
+ llvm::Function *Parent=0,
+ llvm::BasicBlock *InsertBefore=0) {
+#ifdef NDEBUG
+ return llvm::BasicBlock::Create(VMContext, "", Parent, InsertBefore);
+#else
+ return llvm::BasicBlock::Create(VMContext, Name, Parent, InsertBefore);
+#endif
+ }
+
+ /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
+ /// label maps to.
+ JumpDest getJumpDestForLabel(const LabelStmt *S);
+
+ /// SimplifyForwardingBlocks - If the given basic block is only a branch to
+ /// another basic block, simplify it. This assumes that no other code could
+ /// potentially reference the basic block.
+ void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
+
+ /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
+ /// adding a fall-through branch from the current insert block if
+ /// necessary. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// IsFinished - If true, indicates that the caller has finished emitting
+ /// branches to the given block and does not expect to emit code into it. This
+ /// means the block can be ignored if it is unreachable.
+ void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
+
+ /// EmitBranch - Emit a branch to the specified basic block from the current
+ /// insert block, taking care to avoid creation of branches from dummy
+ /// blocks. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// This function clears the current insertion point. The caller should follow
+ /// calls to this function with calls to Emit*Block prior to generation new
+ /// code.
+ void EmitBranch(llvm::BasicBlock *Block);
+
+ /// HaveInsertPoint - True if an insertion point is defined. If not, this
+ /// indicates that the current code being emitted is unreachable.
+ bool HaveInsertPoint() const {
+ return Builder.GetInsertBlock() != 0;
+ }
+
+ /// EnsureInsertPoint - Ensure that an insertion point is defined so that
+ /// emitted IR has a place to go. Note that by definition, if this function
+ /// creates a block then that block is unreachable; callers may do better to
+ /// detect when no insertion point is defined and simply skip IR generation.
+ void EnsureInsertPoint() {
+ if (!HaveInsertPoint())
+ EmitBlock(createBasicBlock());
+ }
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ //===--------------------------------------------------------------------===//
+ // Helpers
+ //===--------------------------------------------------------------------===//
+
+ LValue MakeAddrLValue(llvm::Value *V, QualType T, unsigned Alignment = 0) {
+ return LValue::MakeAddr(V, T, Alignment, getContext());
+ }
+
+ /// CreateTempAlloca - This creates a alloca and inserts it into the entry
+ /// block. The caller is responsible for setting an appropriate alignment on
+ /// the alloca.
+ llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
+ const llvm::Twine &Name = "tmp");
+
+ /// InitTempAlloca - Provide an initial value for the given alloca.
+ void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
+
+ /// CreateIRTemp - Create a temporary IR object of the given type, with
+ /// appropriate alignment. This routine should only be used when an temporary
+ /// value needs to be stored into an alloca (for example, to avoid explicit
+ /// PHI construction), but the type is the IR type, not the type appropriate
+ /// for storing in memory.
+ llvm::AllocaInst *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp");
+
+ /// CreateMemTemp - Create a temporary memory object of the given type, with
+ /// appropriate alignment.
+ llvm::AllocaInst *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
+
+ /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+ /// expression and compare the result against zero, returning an Int1Ty value.
+ llvm::Value *EvaluateExprAsBool(const Expr *E);
+
+ /// EmitAnyExpr - Emit code to compute the specified expression which can have
+ /// any type. The result is returned as an RValue struct. If this is an
+ /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
+ /// the result should be returned.
+ ///
+ /// \param IgnoreResult - True if the resulting value isn't used.
+ RValue EmitAnyExpr(const Expr *E, llvm::Value *AggLoc = 0,
+ bool IsAggLocVolatile = false, bool IgnoreResult = false,
+ bool IsInitializer = false);
+
+ // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
+ // or the value of the expression, depending on how va_list is defined.
+ llvm::Value *EmitVAListRef(const Expr *E);
+
+ /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+ /// always be accessible even if no aggregate location is provided.
+ RValue EmitAnyExprToTemp(const Expr *E, bool IsAggLocVolatile = false,
+ bool IsInitializer = false);
+
+ /// EmitsAnyExprToMem - Emits the code necessary to evaluate an
+ /// arbitrary expression into the given memory location.
+ void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
+ bool IsLocationVolatile = false,
+ bool IsInitializer = false);
+
+ /// EmitAggregateCopy - Emit an aggrate copy.
+ ///
+ /// \param isVolatile - True iff either the source or the destination is
+ /// volatile.
+ void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType EltTy, bool isVolatile=false);
+
+ /// StartBlock - Start new block named N. If insert block is a dummy block
+ /// then reuse it.
+ void StartBlock(const char *N);
+
+ /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
+ llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
+ return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
+ }
+
+ /// GetAddrOfLocalVar - Return the address of a local variable.
+ llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
+ llvm::Value *Res = LocalDeclMap[VD];
+ assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
+ return Res;
+ }
+
+ /// getAccessedFieldNo - Given an encoded value and a result number, return
+ /// the input field number being accessed.
+ static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
+
+ llvm::BlockAddress *GetAddrOfLabel(const LabelStmt *L);
+ llvm::BasicBlock *GetIndirectGotoBlock();
+
+ /// EmitNullInitialization - Generate code to set a value of the given type to
+ /// null, If the type contains data member pointers, they will be initialized
+ /// to -1 in accordance with the Itanium C++ ABI.
+ void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
+
+ // EmitVAArg - Generate code to get an argument from the passed in pointer
+ // and update it accordingly. The return value is a pointer to the argument.
+ // FIXME: We should be able to get rid of this method and use the va_arg
+ // instruction in LLVM instead once it works well enough.
+ llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
+
+ /// EmitVLASize - Generate code for any VLA size expressions that might occur
+ /// in a variably modified type. If Ty is a VLA, will return the value that
+ /// corresponds to the size in bytes of the VLA type. Will return 0 otherwise.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ llvm::Value *EmitVLASize(QualType Ty);
+
+ // GetVLASize - Returns an LLVM value that corresponds to the size in bytes
+ // of a variable length array type.
+ llvm::Value *GetVLASize(const VariableArrayType *);
+
+ /// LoadCXXThis - Load the value of 'this'. This function is only valid while
+ /// generating code for an C++ member function.
+ llvm::Value *LoadCXXThis() {
+ assert(CXXThisValue && "no 'this' value for this function");
+ return CXXThisValue;
+ }
+
+ /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
+ /// virtual bases.
+ llvm::Value *LoadCXXVTT() {
+ assert(CXXVTTValue && "no VTT value for this function");
+ return CXXVTTValue;
+ }
+
+ /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
+ /// complete class to the given direct base.
+ llvm::Value *
+ GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual);
+
+ /// GetAddressOfBaseClass - This function will add the necessary delta to the
+ /// load of 'this' and returns address of the base class.
+ llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue);
+
+ llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue);
+
+ llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl);
+
+ void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args);
+ void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
+ bool ForVirtualBase, llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ const ConstantArrayType *ArrayTy,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization = false);
+
+ void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ llvm::Value *NumElements,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization = false);
+
+ void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This);
+
+ void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ llvm::Value *NumElements,
+ llvm::Value *This);
+
+ llvm::Function *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This);
+
+ void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
+ bool ForVirtualBase, llvm::Value *This);
+
+ void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr,
+ llvm::Value *NumElements);
+
+ void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
+
+ llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
+ void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
+
+ void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
+ QualType DeleteTy);
+
+ llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
+ llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
+
+ void EmitCheck(llvm::Value *, unsigned Size);
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+ ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+ //===--------------------------------------------------------------------===//
+ // Declaration Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitDecl - Emit a declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitDecl(const Decl &D);
+
+ /// EmitBlockVarDecl - Emit a block variable declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitBlockVarDecl(const VarDecl &D);
+
+ typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
+ llvm::Value *Address);
+
+ /// EmitLocalBlockVarDecl - Emit a local block variable declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitLocalBlockVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0);
+
+ void EmitStaticBlockVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
+ void EmitParmDecl(const VarDecl &D, llvm::Value *Arg);
+
+ //===--------------------------------------------------------------------===//
+ // Statement Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
+ void EmitStopPoint(const Stmt *S);
+
+ /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
+ /// this function even if there is no current insertion point.
+ ///
+ /// This function may clear the current insertion point; callers should use
+ /// EnsureInsertPoint if they wish to subsequently generate code without first
+ /// calling EmitBlock, EmitBranch, or EmitStmt.
+ void EmitStmt(const Stmt *S);
+
+ /// EmitSimpleStmt - Try to emit a "simple" statement which does not
+ /// necessarily require an insertion point or debug information; typically
+ /// because the statement amounts to a jump or a container of other
+ /// statements.
+ ///
+ /// \return True if the statement was handled.
+ bool EmitSimpleStmt(const Stmt *S);
+
+ RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
+ llvm::Value *AggLoc = 0, bool isAggVol = false);
+
+ /// EmitLabel - Emit the block for the given label. It is legal to call this
+ /// function even if there is no current insertion point.
+ void EmitLabel(const LabelStmt &S); // helper for EmitLabelStmt.
+
+ void EmitLabelStmt(const LabelStmt &S);
+ void EmitGotoStmt(const GotoStmt &S);
+ void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
+ void EmitIfStmt(const IfStmt &S);
+ void EmitWhileStmt(const WhileStmt &S);
+ void EmitDoStmt(const DoStmt &S);
+ void EmitForStmt(const ForStmt &S);
+ void EmitReturnStmt(const ReturnStmt &S);
+ void EmitDeclStmt(const DeclStmt &S);
+ void EmitBreakStmt(const BreakStmt &S);
+ void EmitContinueStmt(const ContinueStmt &S);
+ void EmitSwitchStmt(const SwitchStmt &S);
+ void EmitDefaultStmt(const DefaultStmt &S);
+ void EmitCaseStmt(const CaseStmt &S);
+ void EmitCaseStmtRange(const CaseStmt &S);
+ void EmitAsmStmt(const AsmStmt &S);
+
+ void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
+ void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
+ void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
+ void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+
+ llvm::Constant *getUnwindResumeOrRethrowFn();
+ void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
+ void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
+
+ void EmitCXXTryStmt(const CXXTryStmt &S);
+
+ //===--------------------------------------------------------------------===//
+ // LValue Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
+ RValue GetUndefRValue(QualType Ty);
+
+ /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
+ /// and issue an ErrorUnsupported style diagnostic (using the
+ /// provided Name).
+ RValue EmitUnsupportedRValue(const Expr *E,
+ const char *Name);
+
+ /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
+ /// an ErrorUnsupported style diagnostic (using the provided Name).
+ LValue EmitUnsupportedLValue(const Expr *E,
+ const char *Name);
+
+ /// EmitLValue - Emit code to compute a designator that specifies the location
+ /// of the expression.
+ ///
+ /// This can return one of two things: a simple address or a bitfield
+ /// reference. In either case, the LLVM Value* in the LValue structure is
+ /// guaranteed to be an LLVM pointer type.
+ ///
+ /// If this returns a bitfield reference, nothing about the pointee type of
+ /// the LLVM value is known: For example, it may not be a pointer to an
+ /// integer.
+ ///
+ /// If this returns a normal address, and if the lvalue's C type is fixed
+ /// size, this method guarantees that the returned pointer type will point to
+ /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
+ /// variable length type, this is not possible.
+ ///
+ LValue EmitLValue(const Expr *E);
+
+ /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
+ /// checking code to guard against undefined behavior. This is only
+ /// suitable when we know that the address will be used to access the
+ /// object.
+ LValue EmitCheckedLValue(const Expr *E);
+
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ unsigned Alignment, QualType Ty);
+
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, unsigned Alignment, QualType Ty);
+
+ /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
+ /// this method emits the address of the lvalue, then loads the result as an
+ /// rvalue, returning the rvalue.
+ RValue EmitLoadOfLValue(LValue V, QualType LVType);
+ RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
+ RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
+ RValue EmitLoadOfPropertyRefLValue(LValue LV, QualType ExprType);
+ RValue EmitLoadOfKVCRefLValue(LValue LV, QualType ExprType);
+
+
+ /// EmitStoreThroughLValue - Store the specified rvalue into the specified
+ /// lvalue, where both are guaranteed to the have the same type, and that type
+ /// is 'Ty'.
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
+ void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
+ QualType Ty);
+ void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst, QualType Ty);
+ void EmitStoreThroughKVCRefLValue(RValue Src, LValue Dst, QualType Ty);
+
+ /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
+ /// EmitStoreThroughLValue.
+ ///
+ /// \param Result [out] - If non-null, this will be set to a Value* for the
+ /// bit-field contents after the store, appropriate for use as the result of
+ /// an assignment to the bit-field.
+ void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
+ llvm::Value **Result=0);
+
+ // Note: only availabe for agg return types
+ LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
+ LValue EmitCompoundAssignOperatorLValue(const CompoundAssignOperator *E);
+ // Note: only available for agg return types
+ LValue EmitCallExprLValue(const CallExpr *E);
+ // Note: only available for agg return types
+ LValue EmitVAArgExprLValue(const VAArgExpr *E);
+ LValue EmitDeclRefLValue(const DeclRefExpr *E);
+ LValue EmitStringLiteralLValue(const StringLiteral *E);
+ LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
+ LValue EmitPredefinedLValue(const PredefinedExpr *E);
+ LValue EmitUnaryOpLValue(const UnaryOperator *E);
+ LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
+ LValue EmitMemberExpr(const MemberExpr *E);
+ LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
+ LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
+ LValue EmitConditionalOperatorLValue(const ConditionalOperator *E);
+ LValue EmitCastLValue(const CastExpr *E);
+ LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
+
+ llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+ LValue EmitLValueForAnonRecordField(llvm::Value* Base,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers);
+ LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
+ /// if the Field is a reference, this will return the address of the reference
+ /// and not the address of the value stored in the reference.
+ LValue EmitLValueForFieldInitialization(llvm::Value* Base,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ LValue EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value* Base, const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+
+ LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
+
+ LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
+ LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
+ LValue EmitCXXExprWithTemporariesLValue(const CXXExprWithTemporaries *E);
+ LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
+
+ LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
+ LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
+ LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
+ LValue EmitObjCKVCRefLValue(const ObjCImplicitSetterGetterRefExpr *E);
+ LValue EmitObjCSuperExprLValue(const ObjCSuperExpr *E);
+ LValue EmitStmtExprLValue(const StmtExpr *E);
+ LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
+ LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
+ void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::ConstantInt *Init);
+ //===--------------------------------------------------------------------===//
+ // Scalar Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitCall - Generate a call of the given function, expecting the given
+ /// result type, and using the given argument list which specifies both the
+ /// LLVM arguments and the types they were derived from.
+ ///
+ /// \param TargetDecl - If given, the decl of the function in a direct call;
+ /// used to set attributes on the call (noreturn, etc.).
+ RValue EmitCall(const CGFunctionInfo &FnInfo,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ const CallArgList &Args,
+ const Decl *TargetDecl = 0,
+ llvm::Instruction **callOrInvoke = 0);
+
+ RValue EmitCall(QualType FnType, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl = 0);
+ RValue EmitCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue = ReturnValueSlot());
+
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
+ llvm::Value * const *ArgBegin,
+ llvm::Value * const *ArgEnd,
+ const llvm::Twine &Name = "");
+
+ llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
+ const llvm::Type *Ty);
+ llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *&This, const llvm::Type *Ty);
+
+ RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ llvm::Value *This,
+ llvm::Value *VTT,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+ RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue);
+ RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue);
+
+
+ RValue EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E);
+
+ RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
+
+ /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
+ /// is unhandled by the current target.
+ llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitNeonCall(llvm::Function *F,
+ llvm::SmallVectorImpl<llvm::Value*> &O,
+ const char *name, bool splat = false,
+ unsigned shift = 0, bool rightshift = false);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
+ bool widen = false);
+ llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
+ bool negateForRightShift);
+
+ llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
+ llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
+ llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
+ RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return = ReturnValueSlot());
+ RValue EmitObjCPropertyGet(const Expr *E,
+ ReturnValueSlot Return = ReturnValueSlot());
+ RValue EmitObjCSuperPropertyGet(const Expr *Exp, const Selector &S,
+ ReturnValueSlot Return = ReturnValueSlot());
+ void EmitObjCPropertySet(const Expr *E, RValue Src);
+ void EmitObjCSuperPropertySet(const Expr *E, const Selector &S, RValue Src);
+
+
+ /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
+ /// expression. Will emit a temporary variable if E is not an LValue.
+ RValue EmitReferenceBindingToExpr(const Expr* E,
+ const NamedDecl *InitializedDecl);
+
+ //===--------------------------------------------------------------------===//
+ // Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ // Expressions are broken into three classes: scalar, complex, aggregate.
+
+ /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
+ /// scalar type, returning the result.
+ llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
+ QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
+ llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
+ QualType DstTy);
+
+
+ /// EmitAggExpr - Emit the computation of the specified expression of
+ /// aggregate type. The result is computed into DestPtr. Note that if
+ /// DestPtr is null, the value of the aggregate expression is not needed.
+ void EmitAggExpr(const Expr *E, llvm::Value *DestPtr, bool VolatileDest,
+ bool IgnoreResult = false, bool IsInitializer = false,
+ bool RequiresGCollection = false);
+
+ /// EmitAggExprToLValue - Emit the computation of the specified expression of
+ /// aggregate type into a temporary LValue.
+ LValue EmitAggExprToLValue(const Expr *E);
+
+ /// EmitGCMemmoveCollectable - Emit special API for structs with object
+ /// pointers.
+ void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType Ty);
+
+ /// EmitComplexExpr - Emit the computation of the specified expression of
+ /// complex type, returning the result.
+ ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
+ bool IgnoreImag = false,
+ bool IgnoreRealAssign = false,
+ bool IgnoreImagAssign = false);
+
+ /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+ /// of complex type, storing into the specified Value*.
+ void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+
+ /// StoreComplexToAddr - Store a complex number into the specified address.
+ void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+ /// LoadComplexFromAddr - Load a complex number from the specified address.
+ ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
+
+ /// CreateStaticBlockVarDecl - Create a zero-initialized LLVM global for a
+ /// static block var decl.
+ llvm::GlobalVariable *CreateStaticBlockVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ /// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the
+ /// global variable that has already been created for it. If the initializer
+ /// has a different type than GV does, this may free GV and return a different
+ /// one. Otherwise it just returns GV.
+ llvm::GlobalVariable *
+ AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV);
+
+
+ /// EmitStaticCXXBlockVarDeclInit - Create the initializer for a C++ runtime
+ /// initialized static block var decl.
+ void EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
+ llvm::GlobalVariable *GV);
+
+ /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
+ /// variable with global storage.
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr);
+
+ /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
+ /// with the C++ runtime so that its destructor will be called at exit.
+ void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
+ llvm::Constant *DeclPtr);
+
+ /// GenerateCXXGlobalInitFunc - Generates code for initializing global
+ /// variables.
+ void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ llvm::Constant **Decls,
+ unsigned NumDecls);
+
+ /// GenerateCXXGlobalDtorFunc - Generates code for destroying global
+ /// variables.
+ void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakVH,
+ llvm::Constant*> > &DtorsAndObjects);
+
+ void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D);
+
+ void EmitCXXConstructExpr(llvm::Value *Dest, const CXXConstructExpr *E);
+
+ RValue EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
+ llvm::Value *AggLoc = 0,
+ bool IsAggLocVolatile = false,
+ bool IsInitializer = false);
+
+ void EmitCXXThrowExpr(const CXXThrowExpr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Internal Helpers
+ //===--------------------------------------------------------------------===//
+
+ /// ContainsLabel - Return true if the statement contains a label in it. If
+ /// this statement is not executed normally, it not containing a label means
+ /// that we can just remove the code.
+ static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
+
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return 0. If it
+ /// constant folds to 'true' and does not contain a label, return 1, if it
+ /// constant folds to 'false' and does not contain a label, return -1.
+ int ConstantFoldsToSimpleInteger(const Expr *Cond);
+
+ /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
+ /// if statement) to the specified blocks. Based on the condition, this might
+ /// try to simplify the codegen of the conditional based on the branch.
+ void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock);
+
+ /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
+ /// generate a branch around the created basic block as necessary.
+ llvm::BasicBlock *getTrapBB();
+
+ /// EmitCallArg - Emit a single call argument.
+ RValue EmitCallArg(const Expr *E, QualType ArgType);
+
+ /// EmitDelegateCallArg - We are performing a delegate call; that
+ /// is, the current function is delegating to another one. Produce
+ /// a r-value suitable for passing the given parameter.
+ RValue EmitDelegateCallArg(const VarDecl *Param);
+
+private:
+ void EmitReturnOfRValue(RValue RV, QualType Ty);
+
+ /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
+ /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
+ ///
+ /// \param AI - The first function argument of the expansion.
+ /// \return The argument following the last expanded function
+ /// argument.
+ llvm::Function::arg_iterator
+ ExpandTypeFromArgs(QualType Ty, LValue Dst,
+ llvm::Function::arg_iterator AI);
+
+ /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
+ /// Ty, into individual arguments on the provided vector \arg Args. See
+ /// ABIArgInfo::Expand.
+ void ExpandTypeToArgs(QualType Ty, RValue Src,
+ llvm::SmallVector<llvm::Value*, 16> &Args);
+
+ llvm::Value* EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr, std::string &ConstraintStr);
+
+ llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ LValue InputValue, QualType InputType,
+ std::string &ConstraintStr);
+
+ /// EmitCallArgs - Emit call arguments for a function.
+ /// The CallArgTypeInfo parameter is used for iterating over the known
+ /// argument types of the function being called.
+ template<typename T>
+ void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ CallExpr::const_arg_iterator Arg = ArgBeg;
+
+ // First, use the argument types that the type info knows about
+ if (CallArgTypeInfo) {
+ for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
+ E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
+ assert(Arg != ArgEnd && "Running over edge of argument list!");
+ QualType ArgType = *I;
+
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(Arg->getType()).getTypePtr() &&
+ "type mismatch in call argument!");
+
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ }
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (; Arg != ArgEnd; ++Arg) {
+ QualType ArgType = Arg->getType();
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+ }
+
+ const TargetCodeGenInfo &getTargetHooks() const {
+ return CGM.getTargetCodeGenInfo();
+ }
+
+ void EmitDeclMetadata();
+};
+
+/// CGBlockInfo - Information to generate a block literal.
+class CGBlockInfo {
+public:
+ /// Name - The name of the block, kindof.
+ const char *Name;
+
+ /// DeclRefs - Variables from parent scopes that have been
+ /// imported into this block.
+ llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs;
+
+ /// InnerBlocks - This block and the blocks it encloses.
+ llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks;
+
+ /// CXXThisRef - Non-null if 'this' was required somewhere, in
+ /// which case this is that expression.
+ const CXXThisExpr *CXXThisRef;
+
+ /// NeedsObjCSelf - True if something in this block has an implicit
+ /// reference to 'self'.
+ bool NeedsObjCSelf;
+
+ /// These are initialized by GenerateBlockFunction.
+ bool BlockHasCopyDispose;
+ CharUnits BlockSize;
+ CharUnits BlockAlign;
+ llvm::SmallVector<const Expr*, 8> BlockLayout;
+
+ CGBlockInfo(const char *Name);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
new file mode 100644
index 0000000..d125b37
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -0,0 +1,2162 @@
+//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-module state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CGCall.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "Mangle.h"
+#include "TargetInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Module.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace CodeGen;
+
+static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
+ switch (CGM.getContext().Target.getCXXABI()) {
+ case CXXABI_ARM: return *CreateARMCXXABI(CGM);
+ case CXXABI_Itanium: return *CreateItaniumCXXABI(CGM);
+ case CXXABI_Microsoft: return *CreateMicrosoftCXXABI(CGM);
+ }
+
+ llvm_unreachable("invalid C++ ABI kind");
+ return *CreateItaniumCXXABI(CGM);
+}
+
+
+CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
+ llvm::Module &M, const llvm::TargetData &TD,
+ Diagnostic &diags)
+ : BlockModule(C, M, TD, Types, *this), Context(C),
+ Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
+ TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
+ ABI(createCXXABI(*this)),
+ Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI),
+ VTables(*this), Runtime(0),
+ CFConstantStringClassRef(0), NSConstantStringClassRef(0),
+ VMContext(M.getContext()),
+ NSConcreteGlobalBlockDecl(0), NSConcreteStackBlockDecl(0),
+ NSConcreteGlobalBlock(0), NSConcreteStackBlock(0),
+ BlockObjectAssignDecl(0), BlockObjectDisposeDecl(0),
+ BlockObjectAssign(0), BlockObjectDispose(0){
+
+ if (!Features.ObjC1)
+ Runtime = 0;
+ else if (!Features.NeXTRuntime)
+ Runtime = CreateGNUObjCRuntime(*this);
+ else if (Features.ObjCNonFragileABI)
+ Runtime = CreateMacNonFragileABIObjCRuntime(*this);
+ else
+ Runtime = CreateMacObjCRuntime(*this);
+
+ // If debug info generation is enabled, create the CGDebugInfo object.
+ DebugInfo = CodeGenOpts.DebugInfo ? new CGDebugInfo(*this) : 0;
+}
+
+CodeGenModule::~CodeGenModule() {
+ delete Runtime;
+ delete &ABI;
+ delete DebugInfo;
+}
+
+void CodeGenModule::createObjCRuntime() {
+ if (!Features.NeXTRuntime)
+ Runtime = CreateGNUObjCRuntime(*this);
+ else if (Features.ObjCNonFragileABI)
+ Runtime = CreateMacNonFragileABIObjCRuntime(*this);
+ else
+ Runtime = CreateMacObjCRuntime(*this);
+}
+
+void CodeGenModule::Release() {
+ EmitDeferred();
+ EmitCXXGlobalInitFunc();
+ EmitCXXGlobalDtorFunc();
+ if (Runtime)
+ if (llvm::Function *ObjCInitFunction = Runtime->ModuleInitFunction())
+ AddGlobalCtor(ObjCInitFunction);
+ EmitCtorList(GlobalCtors, "llvm.global_ctors");
+ EmitCtorList(GlobalDtors, "llvm.global_dtors");
+ EmitAnnotations();
+ EmitLLVMUsed();
+
+ if (getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
+}
+
+bool CodeGenModule::isTargetDarwin() const {
+ return getContext().Target.getTriple().getOS() == llvm::Triple::Darwin;
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
+ << Msg << S->getSourceRange();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified decl yet.
+void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
+}
+
+LangOptions::VisibilityMode
+CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (VD->getStorageClass() == SC_PrivateExtern)
+ return LangOptions::Hidden;
+
+ if (const VisibilityAttr *attr = D->getAttr<VisibilityAttr>()) {
+ switch (attr->getVisibility()) {
+ default: assert(0 && "Unknown visibility!");
+ case VisibilityAttr::Default:
+ return LangOptions::Default;
+ case VisibilityAttr::Hidden:
+ return LangOptions::Hidden;
+ case VisibilityAttr::Protected:
+ return LangOptions::Protected;
+ }
+ }
+
+ if (getLangOptions().CPlusPlus) {
+ // Entities subject to an explicit instantiation declaration get default
+ // visibility.
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ if (Function->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return LangOptions::Default;
+ } else if (const ClassTemplateSpecializationDecl *ClassSpec
+ = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ if (ClassSpec->getSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return LangOptions::Default;
+ } else if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ if (Record->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return LangOptions::Default;
+ } else if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ if (Var->isStaticDataMember() &&
+ (Var->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration))
+ return LangOptions::Default;
+ }
+
+ // If -fvisibility-inlines-hidden was provided, then inline C++ member
+ // functions get "hidden" visibility by default.
+ if (getLangOptions().InlineVisibilityHidden)
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ if (Method->isInlined())
+ return LangOptions::Hidden;
+ }
+
+ // If this decl is contained in a class, it should have the same visibility
+ // as the parent class.
+ if (const DeclContext *DC = D->getDeclContext())
+ if (DC->isRecord())
+ return getDeclVisibilityMode(cast<Decl>(DC));
+
+ return getLangOptions().getVisibilityMode();
+}
+
+void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
+ const Decl *D) const {
+ // Internal definitions always have default visibility.
+ if (GV->hasLocalLinkage()) {
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ return;
+ }
+
+ switch (getDeclVisibilityMode(D)) {
+ default: assert(0 && "Unknown visibility!");
+ case LangOptions::Default:
+ return GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ case LangOptions::Hidden:
+ return GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ case LangOptions::Protected:
+ return GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+ }
+}
+
+/// Set the symbol visibility of type information (vtable and RTTI)
+/// associated with the given type.
+void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
+ const CXXRecordDecl *RD,
+ bool IsForRTTI) const {
+ setGlobalVisibility(GV, RD);
+
+ if (!CodeGenOpts.HiddenWeakVTables)
+ return;
+
+ // We want to drop the visibility to hidden for weak type symbols.
+ // This isn't possible if there might be unresolved references
+ // elsewhere that rely on this symbol being visible.
+
+ // This should be kept roughly in sync with setThunkVisibility
+ // in CGVTables.cpp.
+
+ // Preconditions.
+ if (GV->getLinkage() != llvm::GlobalVariable::WeakODRLinkage ||
+ GV->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
+ return;
+
+ // Don't override an explicit visibility attribute.
+ if (RD->hasAttr<VisibilityAttr>())
+ return;
+
+ switch (RD->getTemplateSpecializationKind()) {
+ // We have to disable the optimization if this is an EI definition
+ // because there might be EI declarations in other shared objects.
+ case TSK_ExplicitInstantiationDefinition:
+ case TSK_ExplicitInstantiationDeclaration:
+ return;
+
+ // Every use of a non-template class's type information has to emit it.
+ case TSK_Undeclared:
+ break;
+
+ // In theory, implicit instantiations can ignore the possibility of
+ // an explicit instantiation declaration because there necessarily
+ // must be an EI definition somewhere with default visibility. In
+ // practice, it's possible to have an explicit instantiation for
+ // an arbitrary template class, and linkers aren't necessarily able
+ // to deal with mixed-visibility symbols.
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ if (!CodeGenOpts.HiddenWeakTemplateVTables)
+ return;
+ break;
+ }
+
+ // If there's a key function, there may be translation units
+ // that don't have the key function's definition. But ignore
+ // this if we're emitting RTTI under -fno-rtti.
+ if (!IsForRTTI || Features.RTTI)
+ if (Context.getKeyFunction(RD))
+ return;
+
+ // Otherwise, drop the visibility to hidden.
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+}
+
+llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
+
+ llvm::StringRef &Str = MangledDeclNames[GD.getCanonicalDecl()];
+ if (!Str.empty())
+ return Str;
+
+ if (!getCXXABI().getMangleContext().shouldMangleDeclName(ND)) {
+ IdentifierInfo *II = ND->getIdentifier();
+ assert(II && "Attempt to mangle unnamed decl.");
+
+ Str = II->getName();
+ return Str;
+ }
+
+ llvm::SmallString<256> Buffer;
+ if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
+ getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Buffer);
+ else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
+ getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Buffer);
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND))
+ getCXXABI().getMangleContext().mangleBlock(GD, BD, Buffer);
+ else
+ getCXXABI().getMangleContext().mangleName(ND, Buffer);
+
+ // Allocate space for the mangled name.
+ size_t Length = Buffer.size();
+ char *Name = MangledNamesAllocator.Allocate<char>(Length);
+ std::copy(Buffer.begin(), Buffer.end(), Name);
+
+ Str = llvm::StringRef(Name, Length);
+
+ return Str;
+}
+
+void CodeGenModule::getMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD) {
+ getCXXABI().getMangleContext().mangleBlock(GD, BD, Buffer.getBuffer());
+}
+
+llvm::GlobalValue *CodeGenModule::GetGlobalValue(llvm::StringRef Name) {
+ return getModule().getNamedValue(Name);
+}
+
+/// AddGlobalCtor - Add a function to the list that will be called before
+/// main() runs.
+void CodeGenModule::AddGlobalCtor(llvm::Function * Ctor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalCtors.push_back(std::make_pair(Ctor, Priority));
+}
+
+/// AddGlobalDtor - Add a function to the list that will be called
+/// when the module is unloaded.
+void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalDtors.push_back(std::make_pair(Dtor, Priority));
+}
+
+void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
+ // Ctor function type is void()*.
+ llvm::FunctionType* CtorFTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ std::vector<const llvm::Type*>(),
+ false);
+ llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
+
+ // Get the type of a ctor entry, { i32, void ()* }.
+ llvm::StructType* CtorStructTy =
+ llvm::StructType::get(VMContext, llvm::Type::getInt32Ty(VMContext),
+ llvm::PointerType::getUnqual(CtorFTy), NULL);
+
+ // Construct the constructor and destructor arrays.
+ std::vector<llvm::Constant*> Ctors;
+ for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
+ std::vector<llvm::Constant*> S;
+ S.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ I->second, false));
+ S.push_back(llvm::ConstantExpr::getBitCast(I->first, CtorPFTy));
+ Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
+ }
+
+ if (!Ctors.empty()) {
+ llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
+ new llvm::GlobalVariable(TheModule, AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Ctors),
+ GlobalName);
+ }
+}
+
+void CodeGenModule::EmitAnnotations() {
+ if (Annotations.empty())
+ return;
+
+ // Create a new global variable for the ConstantStruct in the Module.
+ llvm::Constant *Array =
+ llvm::ConstantArray::get(llvm::ArrayType::get(Annotations[0]->getType(),
+ Annotations.size()),
+ Annotations);
+ llvm::GlobalValue *gv =
+ new llvm::GlobalVariable(TheModule, Array->getType(), false,
+ llvm::GlobalValue::AppendingLinkage, Array,
+ "llvm.global.annotations");
+ gv->setSection("llvm.metadata");
+}
+
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
+ GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
+
+ if (Linkage == GVA_Internal)
+ return llvm::Function::InternalLinkage;
+
+ if (D->hasAttr<DLLExportAttr>())
+ return llvm::Function::DLLExportLinkage;
+
+ if (D->hasAttr<WeakAttr>())
+ return llvm::Function::WeakAnyLinkage;
+
+ // In C99 mode, 'inline' functions are guaranteed to have a strong
+ // definition somewhere else, so we can use available_externally linkage.
+ if (Linkage == GVA_C99Inline)
+ return llvm::Function::AvailableExternallyLinkage;
+
+ // In C++, the compiler has to emit a definition in every translation unit
+ // that references the function. We should use linkonce_odr because
+ // a) if all references in this translation unit are optimized away, we
+ // don't need to codegen it. b) if the function persists, it needs to be
+ // merged with other definitions. c) C++ has the ODR, so we know the
+ // definition is dependable.
+ if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
+ return llvm::Function::LinkOnceODRLinkage;
+
+ // An explicit instantiation of a template has weak linkage, since
+ // explicit instantiations can occur in multiple translation units
+ // and must all be equivalent. However, we are not allowed to
+ // throw away these explicit instantiations.
+ if (Linkage == GVA_ExplicitTemplateInstantiation)
+ return llvm::Function::WeakODRLinkage;
+
+ // Otherwise, we have strong external linkage.
+ assert(Linkage == GVA_StrongExternal);
+ return llvm::Function::ExternalLinkage;
+}
+
+
+/// SetFunctionDefinitionAttributes - Set attributes for a global.
+///
+/// FIXME: This is currently only done for aliases and functions, but not for
+/// variables (these details are set in EmitGlobalVarDefinition for variables).
+void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV) {
+ SetCommonAttributes(D, GV);
+}
+
+void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F) {
+ unsigned CallingConv;
+ AttributeListType AttributeList;
+ ConstructAttributeList(Info, D, AttributeList, CallingConv);
+ F->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.size()));
+ F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+}
+
+void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
+ llvm::Function *F) {
+ if (!Features.Exceptions && !Features.ObjCNonFragileABI)
+ F->addFnAttr(llvm::Attribute::NoUnwind);
+
+ if (D->hasAttr<AlwaysInlineAttr>())
+ F->addFnAttr(llvm::Attribute::AlwaysInline);
+
+ if (D->hasAttr<NoInlineAttr>())
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ if (Features.getStackProtectorMode() == LangOptions::SSPOn)
+ F->addFnAttr(llvm::Attribute::StackProtect);
+ else if (Features.getStackProtectorMode() == LangOptions::SSPReq)
+ F->addFnAttr(llvm::Attribute::StackProtectReq);
+
+ unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
+ if (alignment)
+ F->setAlignment(alignment);
+
+ // C++ ABI requires 2-byte alignment for member functions.
+ if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
+ F->setAlignment(2);
+}
+
+void CodeGenModule::SetCommonAttributes(const Decl *D,
+ llvm::GlobalValue *GV) {
+ setGlobalVisibility(GV, D);
+
+ if (D->hasAttr<UsedAttr>())
+ AddUsedGlobal(GV);
+
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>())
+ GV->setSection(SA->getName());
+
+ getTargetCodeGenInfo().SetTargetAttributes(D, GV, *this);
+}
+
+void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
+ llvm::Function *F,
+ const CGFunctionInfo &FI) {
+ SetLLVMFunctionAttributes(D, FI, F);
+ SetLLVMFunctionAttributesForDefinition(D, F);
+
+ F->setLinkage(llvm::Function::InternalLinkage);
+
+ SetCommonAttributes(D, F);
+}
+
+void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
+ llvm::Function *F,
+ bool IsIncompleteFunction) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (!IsIncompleteFunction)
+ SetLLVMFunctionAttributes(FD, getTypes().getFunctionInfo(GD), F);
+
+ // Only a few attributes are set on declarations; these may later be
+ // overridden by a definition.
+
+ if (FD->hasAttr<DLLImportAttr>()) {
+ F->setLinkage(llvm::Function::DLLImportLinkage);
+ } else if (FD->hasAttr<WeakAttr>() ||
+ FD->hasAttr<WeakImportAttr>()) {
+ // "extern_weak" is overloaded in LLVM; we probably should have
+ // separate linkage types for this.
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ } else {
+ F->setLinkage(llvm::Function::ExternalLinkage);
+ }
+
+ if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
+ F->setSection(SA->getName());
+}
+
+void CodeGenModule::AddUsedGlobal(llvm::GlobalValue *GV) {
+ assert(!GV->isDeclaration() &&
+ "Only globals with definition can force usage.");
+ LLVMUsed.push_back(GV);
+}
+
+void CodeGenModule::EmitLLVMUsed() {
+ // Don't create llvm.used if there is no need.
+ if (LLVMUsed.empty())
+ return;
+
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ // Convert LLVMUsed to what ConstantArray needs.
+ std::vector<llvm::Constant*> UsedArray;
+ UsedArray.resize(LLVMUsed.size());
+ for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
+ UsedArray[i] =
+ llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
+ i8PTy);
+ }
+
+ if (UsedArray.empty())
+ return;
+ llvm::ArrayType *ATy = llvm::ArrayType::get(i8PTy, UsedArray.size());
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), ATy, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(ATy, UsedArray),
+ "llvm.used");
+
+ GV->setSection("llvm.metadata");
+}
+
+void CodeGenModule::EmitDeferred() {
+ // Emit code for any potentially referenced deferred decls. Since a
+ // previously unused static decl may become used during the generation of code
+ // for a static function, iterate until no changes are made.
+
+ while (!DeferredDeclsToEmit.empty() || !DeferredVTables.empty()) {
+ if (!DeferredVTables.empty()) {
+ const CXXRecordDecl *RD = DeferredVTables.back();
+ DeferredVTables.pop_back();
+ getVTables().GenerateClassData(getVTableLinkage(RD), RD);
+ continue;
+ }
+
+ GlobalDecl D = DeferredDeclsToEmit.back();
+ DeferredDeclsToEmit.pop_back();
+
+ // Check to see if we've already emitted this. This is necessary
+ // for a couple of reasons: first, decls can end up in the
+ // deferred-decls queue multiple times, and second, decls can end
+ // up with definitions in unusual ways (e.g. by an extern inline
+ // function acquiring a strong function redefinition). Just
+ // ignore these cases.
+ //
+ // TODO: That said, looking this up multiple times is very wasteful.
+ llvm::StringRef Name = getMangledName(D);
+ llvm::GlobalValue *CGRef = GetGlobalValue(Name);
+ assert(CGRef && "Deferred decl wasn't referenced?");
+
+ if (!CGRef->isDeclaration())
+ continue;
+
+ // GlobalAlias::isDeclaration() defers to the aliasee, but for our
+ // purposes an alias counts as a definition.
+ if (isa<llvm::GlobalAlias>(CGRef))
+ continue;
+
+ // Otherwise, emit the definition and move on to the next one.
+ EmitGlobalDefinition(D);
+ }
+}
+
+/// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
+/// annotation information for a given GlobalValue. The annotation struct is
+/// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
+/// GlobalValue being annotated. The second field is the constant string
+/// created from the AnnotateAttr's annotation. The third field is a constant
+/// string containing the name of the translation unit. The fourth field is
+/// the line number in the file of the annotated value declaration.
+///
+/// FIXME: this does not unique the annotation string constants, as llvm-gcc
+/// appears to.
+///
+llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ unsigned LineNo) {
+ llvm::Module *M = &getModule();
+
+ // get [N x i8] constants for the annotation string, and the filename string
+ // which are the 2nd and 3rd elements of the global annotation structure.
+ const llvm::Type *SBP = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Constant *anno = llvm::ConstantArray::get(VMContext,
+ AA->getAnnotation(), true);
+ llvm::Constant *unit = llvm::ConstantArray::get(VMContext,
+ M->getModuleIdentifier(),
+ true);
+
+ // Get the two global values corresponding to the ConstantArrays we just
+ // created to hold the bytes of the strings.
+ llvm::GlobalValue *annoGV =
+ new llvm::GlobalVariable(*M, anno->getType(), false,
+ llvm::GlobalValue::PrivateLinkage, anno,
+ GV->getName());
+ // translation unit name string, emitted into the llvm.metadata section.
+ llvm::GlobalValue *unitGV =
+ new llvm::GlobalVariable(*M, unit->getType(), false,
+ llvm::GlobalValue::PrivateLinkage, unit,
+ ".str");
+
+ // Create the ConstantStruct for the global annotation.
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantExpr::getBitCast(GV, SBP),
+ llvm::ConstantExpr::getBitCast(annoGV, SBP),
+ llvm::ConstantExpr::getBitCast(unitGV, SBP),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LineNo)
+ };
+ return llvm::ConstantStruct::get(VMContext, Fields, 4, false);
+}
+
+bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
+ // Never defer when EmitAllDecls is specified.
+ if (Features.EmitAllDecls)
+ return false;
+
+ return !getContext().DeclMustBeEmitted(Global);
+}
+
+llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
+ const AliasAttr *AA = VD->getAttr<AliasAttr>();
+ assert(AA && "No alias?");
+
+ const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
+
+ // See if there is already something with the target's name in the module.
+ llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
+
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl());
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
+ llvm::PointerType::getUnqual(DeclTy), 0);
+ if (!Entry) {
+ llvm::GlobalValue* F = cast<llvm::GlobalValue>(Aliasee);
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ WeakRefReferences.insert(F);
+ }
+
+ return Aliasee;
+}
+
+void CodeGenModule::EmitGlobal(GlobalDecl GD) {
+ const ValueDecl *Global = cast<ValueDecl>(GD.getDecl());
+
+ // Weak references don't produce any output by themselves.
+ if (Global->hasAttr<WeakRefAttr>())
+ return;
+
+ // If this is an alias definition (which otherwise looks like a declaration)
+ // emit it now.
+ if (Global->hasAttr<AliasAttr>())
+ return EmitAliasDefinition(GD);
+
+ // Ignore declarations, they will be emitted on their first use.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+ if (FD->getIdentifier()) {
+ llvm::StringRef Name = FD->getName();
+ if (Name == "_Block_object_assign") {
+ BlockObjectAssignDecl = FD;
+ } else if (Name == "_Block_object_dispose") {
+ BlockObjectDisposeDecl = FD;
+ }
+ }
+
+ // Forward declarations are emitted lazily on first use.
+ if (!FD->isThisDeclarationADefinition())
+ return;
+ } else {
+ const VarDecl *VD = cast<VarDecl>(Global);
+ assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
+
+ if (VD->getIdentifier()) {
+ llvm::StringRef Name = VD->getName();
+ if (Name == "_NSConcreteGlobalBlock") {
+ NSConcreteGlobalBlockDecl = VD;
+ } else if (Name == "_NSConcreteStackBlock") {
+ NSConcreteStackBlockDecl = VD;
+ }
+ }
+
+
+ if (VD->isThisDeclarationADefinition() != VarDecl::Definition)
+ return;
+ }
+
+ // Defer code generation when possible if this is a static definition, inline
+ // function etc. These we only want to emit if they are used.
+ if (!MayDeferGeneration(Global)) {
+ // Emit the definition if it can't be deferred.
+ EmitGlobalDefinition(GD);
+ return;
+ }
+
+ // If we're deferring emission of a C++ variable with an
+ // initializer, remember the order in which it appeared in the file.
+ if (getLangOptions().CPlusPlus && isa<VarDecl>(Global) &&
+ cast<VarDecl>(Global)->hasInit()) {
+ DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
+ CXXGlobalInits.push_back(0);
+ }
+
+ // If the value has already been used, add it directly to the
+ // DeferredDeclsToEmit list.
+ llvm::StringRef MangledName = getMangledName(GD);
+ if (GetGlobalValue(MangledName))
+ DeferredDeclsToEmit.push_back(GD);
+ else {
+ // Otherwise, remember that we saw a deferred decl with this name. The
+ // first use of the mangled name will cause it to move into
+ // DeferredDeclsToEmit.
+ DeferredDecls[MangledName] = GD;
+ }
+}
+
+void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
+ const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
+
+ PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
+ Context.getSourceManager(),
+ "Generating code for declaration");
+
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ // At -O0, don't generate IR for functions with available_externally
+ // linkage.
+ if (CodeGenOpts.OptimizationLevel == 0 &&
+ !Function->hasAttr<AlwaysInlineAttr>() &&
+ getFunctionLinkage(Function)
+ == llvm::Function::AvailableExternallyLinkage)
+ return;
+
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isVirtual())
+ getVTables().EmitThunks(GD);
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
+ return EmitCXXConstructor(CD, GD.getCtorType());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(Method))
+ return EmitCXXDestructor(DD, GD.getDtorType());
+ }
+
+ return EmitGlobalFunctionDefinition(GD);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return EmitGlobalVarDefinition(VD);
+
+ assert(0 && "Invalid argument to EmitGlobalDefinition()");
+}
+
+/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
+/// module, create and return an llvm Function with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the function when it is first created.
+llvm::Constant *
+CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
+ const llvm::Type *Ty,
+ GlobalDecl D) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ if (WeakRefReferences.count(Entry)) {
+ const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl());
+ if (FD && !FD->hasAttr<WeakAttr>())
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
+
+ WeakRefReferences.erase(Entry);
+ }
+
+ if (Entry->getType()->getElementType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ return llvm::ConstantExpr::getBitCast(Entry, PTy);
+ }
+
+ // This function doesn't have a complete type (for example, the return
+ // type is an incomplete struct). Use a fake type instead, and make
+ // sure not to try to set attributes.
+ bool IsIncompleteFunction = false;
+
+ const llvm::FunctionType *FTy;
+ if (isa<llvm::FunctionType>(Ty)) {
+ FTy = cast<llvm::FunctionType>(Ty);
+ } else {
+ FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ std::vector<const llvm::Type*>(), false);
+ IsIncompleteFunction = true;
+ }
+
+ llvm::Function *F = llvm::Function::Create(FTy,
+ llvm::Function::ExternalLinkage,
+ MangledName, &getModule());
+ assert(F->getName() == MangledName && "name was uniqued!");
+ if (D.getDecl())
+ SetFunctionAttributes(D, F, IsIncompleteFunction);
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::StringMap<GlobalDecl>::iterator DDI = DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+ } else if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl())) {
+ // If this the first reference to a C++ inline function in a class, queue up
+ // the deferred function body for emission. These are not seen as
+ // top-level declarations.
+ if (FD->isThisDeclarationADefinition() && MayDeferGeneration(FD))
+ DeferredDeclsToEmit.push_back(D);
+ // A called constructor which has no definition or declaration need be
+ // synthesized.
+ else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
+ if (CD->isImplicit()) {
+ assert(CD->isUsed() && "Sema doesn't consider constructor as used.");
+ DeferredDeclsToEmit.push_back(D);
+ }
+ } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) {
+ if (DD->isImplicit()) {
+ assert(DD->isUsed() && "Sema doesn't consider destructor as used.");
+ DeferredDeclsToEmit.push_back(D);
+ }
+ } else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isCopyAssignment() && MD->isImplicit()) {
+ assert(MD->isUsed() && "Sema doesn't consider CopyAssignment as used.");
+ DeferredDeclsToEmit.push_back(D);
+ }
+ }
+ }
+
+ // Make sure the result is of the requested type.
+ if (!IsIncompleteFunction) {
+ assert(F->getType()->getElementType() == Ty);
+ return F;
+ }
+
+ const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ return llvm::ConstantExpr::getBitCast(F, PTy);
+}
+
+/// GetAddrOfFunction - Return the address of the given function. If Ty is
+/// non-null, then this function will use the specified type if it has to
+/// create it (this occurs when we see a definition of the function).
+llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
+ const llvm::Type *Ty) {
+ // If there was no specific requested type, just convert it now.
+ if (!Ty)
+ Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
+
+ llvm::StringRef MangledName = getMangledName(GD);
+ return GetOrCreateLLVMFunction(MangledName, Ty, GD);
+}
+
+/// CreateRuntimeFunction - Create a new runtime function with the specified
+/// type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
+ llvm::StringRef Name) {
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl());
+}
+
+static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D) {
+ if (!D->getType().isConstant(Context) && !D->getType()->isReferenceType())
+ return false;
+ if (Context.getLangOptions().CPlusPlus &&
+ Context.getBaseElementType(D->getType())->getAs<RecordType>()) {
+ // FIXME: We should do something fancier here!
+ return false;
+ }
+ return true;
+}
+
+/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
+/// create and return an llvm GlobalVariable with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the global when it is first created.
+llvm::Constant *
+CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
+ const llvm::PointerType *Ty,
+ const VarDecl *D) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ if (WeakRefReferences.count(Entry)) {
+ if (D && !D->hasAttr<WeakAttr>())
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
+
+ WeakRefReferences.erase(Entry);
+ }
+
+ if (Entry->getType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ return llvm::ConstantExpr::getBitCast(Entry, Ty);
+ }
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::StringMap<GlobalDecl>::iterator DDI = DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, MangledName, 0,
+ false, Ty->getAddressSpace());
+
+ // Handle things which are present even on external declarations.
+ if (D) {
+ // FIXME: This code is overly simple and should be merged with other global
+ // handling.
+ GV->setConstant(DeclIsConstantGlobal(Context, D));
+
+ // FIXME: Merge with other attribute handling code.
+ if (D->getStorageClass() == SC_PrivateExtern)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
+ if (D->hasAttr<WeakAttr>() ||
+ D->hasAttr<WeakImportAttr>())
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ GV->setThreadLocal(D->isThreadSpecified());
+ }
+
+ return GV;
+}
+
+
+/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+/// given global variable. If Ty is non-null and if the global doesn't exist,
+/// then it will be greated with the specified type instead of whatever the
+/// normal requested type would be.
+llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
+ const llvm::Type *Ty) {
+ assert(D->hasGlobalStorage() && "Not a global variable");
+ QualType ASTTy = D->getType();
+ if (Ty == 0)
+ Ty = getTypes().ConvertTypeForMem(ASTTy);
+
+ const llvm::PointerType *PTy =
+ llvm::PointerType::get(Ty, ASTTy.getAddressSpace());
+
+ llvm::StringRef MangledName = getMangledName(D);
+ return GetOrCreateLLVMGlobal(MangledName, PTy, D);
+}
+
+/// CreateRuntimeVariable - Create a new runtime global variable with the
+/// specified type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
+ llvm::StringRef Name) {
+ return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0);
+}
+
+void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
+ assert(!D->getInit() && "Cannot emit definite definitions here!");
+
+ if (MayDeferGeneration(D)) {
+ // If we have not seen a reference to this variable yet, place it
+ // into the deferred declarations table to be emitted if needed
+ // later.
+ llvm::StringRef MangledName = getMangledName(D);
+ if (!GetGlobalValue(MangledName)) {
+ DeferredDecls[MangledName] = D;
+ return;
+ }
+ }
+
+ // The tentative definition is the only definition.
+ EmitGlobalVarDefinition(D);
+}
+
+void CodeGenModule::EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired) {
+ if (DefinitionRequired)
+ getVTables().GenerateClassData(getVTableLinkage(Class), Class);
+}
+
+llvm::GlobalVariable::LinkageTypes
+CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
+ if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
+ return llvm::GlobalVariable::InternalLinkage;
+
+ if (const CXXMethodDecl *KeyFunction
+ = RD->getASTContext().getKeyFunction(RD)) {
+ // If this class has a key function, use that to determine the linkage of
+ // the vtable.
+ const FunctionDecl *Def = 0;
+ if (KeyFunction->hasBody(Def))
+ KeyFunction = cast<CXXMethodDecl>(Def);
+
+ switch (KeyFunction->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ if (KeyFunction->isInlined())
+ return llvm::GlobalVariable::WeakODRLinkage;
+
+ return llvm::GlobalVariable::ExternalLinkage;
+
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDefinition:
+ return llvm::GlobalVariable::WeakODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // FIXME: Use available_externally linkage. However, this currently
+ // breaks LLVM's build due to undefined symbols.
+ // return llvm::GlobalVariable::AvailableExternallyLinkage;
+ return llvm::GlobalVariable::WeakODRLinkage;
+ }
+ }
+
+ switch (RD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDefinition:
+ return llvm::GlobalVariable::WeakODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // FIXME: Use available_externally linkage. However, this currently
+ // breaks LLVM's build due to undefined symbols.
+ // return llvm::GlobalVariable::AvailableExternallyLinkage;
+ return llvm::GlobalVariable::WeakODRLinkage;
+ }
+
+ // Silence GCC warning.
+ return llvm::GlobalVariable::WeakODRLinkage;
+}
+
+CharUnits CodeGenModule::GetTargetTypeStoreSize(const llvm::Type *Ty) const {
+ return CharUnits::fromQuantity(
+ TheTargetData.getTypeStoreSizeInBits(Ty) / Context.getCharWidth());
+}
+
+void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
+ llvm::Constant *Init = 0;
+ QualType ASTTy = D->getType();
+ bool NonConstInit = false;
+
+ const Expr *InitExpr = D->getAnyInitializer();
+
+ if (!InitExpr) {
+ // This is a tentative definition; tentative definitions are
+ // implicitly initialized with { 0 }.
+ //
+ // Note that tentative definitions are only emitted at the end of
+ // a translation unit, so they should never have incomplete
+ // type. In addition, EmitTentativeDefinition makes sure that we
+ // never attempt to emit a tentative definition if a real one
+ // exists. A use may still exists, however, so we still may need
+ // to do a RAUW.
+ assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
+ Init = EmitNullConstant(D->getType());
+ } else {
+ Init = EmitConstantExpr(InitExpr, D->getType());
+ if (!Init) {
+ QualType T = InitExpr->getType();
+ if (D->getType()->isReferenceType())
+ T = D->getType();
+
+ if (getLangOptions().CPlusPlus) {
+ EmitCXXGlobalVarDeclInitFunc(D);
+ Init = EmitNullConstant(T);
+ NonConstInit = true;
+ } else {
+ ErrorUnsupported(D, "static initializer");
+ Init = llvm::UndefValue::get(getTypes().ConvertType(T));
+ }
+ } else {
+ // We don't need an initializer, so remove the entry for the delayed
+ // initializer position (just in case this entry was delayed).
+ if (getLangOptions().CPlusPlus)
+ DelayedCXXInitPosition.erase(D);
+ }
+ }
+
+ const llvm::Type* InitType = Init->getType();
+ llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast ||
+ // all zero index gep.
+ CE->getOpcode() == llvm::Instruction::GetElementPtr);
+ Entry = CE->getOperand(0);
+ }
+
+ // Entry is now either a Function or GlobalVariable.
+ llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Entry);
+
+ // We have a definition after a declaration with the wrong type.
+ // We must make a new GlobalVariable* and update everything that used OldGV
+ // (a declaration or tentative definition) with the new GlobalVariable*
+ // (which will be a definition).
+ //
+ // This happens if there is a prototype for a global (e.g.
+ // "extern int x[];") and then a definition of a different type (e.g.
+ // "int x[10];"). This also happens when an initializer has a different type
+ // from the type of the global (this happens with unions).
+ if (GV == 0 ||
+ GV->getType()->getElementType() != InitType ||
+ GV->getType()->getAddressSpace() != ASTTy.getAddressSpace()) {
+
+ // Move the old entry aside so that we'll create a new one.
+ Entry->setName(llvm::StringRef());
+
+ // Make a new global with the correct type, this is now guaranteed to work.
+ GV = cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, InitType));
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ cast<llvm::GlobalValue>(Entry)->eraseFromParent();
+ }
+
+ if (const AnnotateAttr *AA = D->getAttr<AnnotateAttr>()) {
+ SourceManager &SM = Context.getSourceManager();
+ AddAnnotation(EmitAnnotateAttr(GV, AA,
+ SM.getInstantiationLineNumber(D->getLocation())));
+ }
+
+ GV->setInitializer(Init);
+
+ // If it is safe to mark the global 'constant', do so now.
+ GV->setConstant(false);
+ if (!NonConstInit && DeclIsConstantGlobal(Context, D))
+ GV->setConstant(true);
+
+ GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
+
+ // Set the llvm linkage type as appropriate.
+ GVALinkage Linkage = getContext().GetGVALinkageForVariable(D);
+ if (Linkage == GVA_Internal)
+ GV->setLinkage(llvm::Function::InternalLinkage);
+ else if (D->hasAttr<DLLImportAttr>())
+ GV->setLinkage(llvm::Function::DLLImportLinkage);
+ else if (D->hasAttr<DLLExportAttr>())
+ GV->setLinkage(llvm::Function::DLLExportLinkage);
+ else if (D->hasAttr<WeakAttr>()) {
+ if (GV->isConstant())
+ GV->setLinkage(llvm::GlobalVariable::WeakODRLinkage);
+ else
+ GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
+ } else if (Linkage == GVA_TemplateInstantiation ||
+ Linkage == GVA_ExplicitTemplateInstantiation)
+ // FIXME: It seems like we can provide more specific linkage here
+ // (LinkOnceODR, WeakODR).
+ GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
+ else if (!getLangOptions().CPlusPlus && !CodeGenOpts.NoCommon &&
+ !D->hasExternalStorage() && !D->getInit() &&
+ !D->getAttr<SectionAttr>() && !D->isThreadSpecified()) {
+ // Thread local vars aren't considered common linkage.
+ GV->setLinkage(llvm::GlobalVariable::CommonLinkage);
+ // common vars aren't constant even if declared const.
+ GV->setConstant(false);
+ } else
+ GV->setLinkage(llvm::GlobalVariable::ExternalLinkage);
+
+ SetCommonAttributes(D, GV);
+
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D->getLocation());
+ DI->EmitGlobalVariable(GV, D);
+ }
+}
+
+/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
+/// implement a function with no prototype, e.g. "int foo() {}". If there are
+/// existing call uses of the old function in the module, this adjusts them to
+/// call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
+ llvm::Function *NewFn) {
+ // If we're redefining a global as a function, don't transform it.
+ llvm::Function *OldFn = dyn_cast<llvm::Function>(Old);
+ if (OldFn == 0) return;
+
+ const llvm::Type *NewRetTy = NewFn->getReturnType();
+ llvm::SmallVector<llvm::Value*, 4> ArgList;
+
+ for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
+ UI != E; ) {
+ // TODO: Do invokes ever occur in C code? If so, we should handle them too.
+ llvm::Value::use_iterator I = UI++; // Increment before the CI is erased.
+ llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*I);
+ if (!CI) continue; // FIXME: when we allow Invoke, just do CallSite CS(*I)
+ llvm::CallSite CS(CI);
+ if (!CI || !CS.isCallee(I)) continue;
+
+ // If the return types don't match exactly, and if the call isn't dead, then
+ // we can't transform this call.
+ if (CI->getType() != NewRetTy && !CI->use_empty())
+ continue;
+
+ // If the function was passed too few arguments, don't transform. If extra
+ // arguments were passed, we silently drop them. If any of the types
+ // mismatch, we don't transform.
+ unsigned ArgNo = 0;
+ bool DontTransform = false;
+ for (llvm::Function::arg_iterator AI = NewFn->arg_begin(),
+ E = NewFn->arg_end(); AI != E; ++AI, ++ArgNo) {
+ if (CS.arg_size() == ArgNo ||
+ CS.getArgument(ArgNo)->getType() != AI->getType()) {
+ DontTransform = true;
+ break;
+ }
+ }
+ if (DontTransform)
+ continue;
+
+ // Okay, we can transform this. Create the new call instruction and copy
+ // over the required information.
+ ArgList.append(CS.arg_begin(), CS.arg_begin() + ArgNo);
+ llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList.begin(),
+ ArgList.end(), "", CI);
+ ArgList.clear();
+ if (!NewCall->getType()->isVoidTy())
+ NewCall->takeName(CI);
+ NewCall->setAttributes(CI->getAttributes());
+ NewCall->setCallingConv(CI->getCallingConv());
+
+ // Finally, remove the old call, replacing any uses with the new one.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(NewCall);
+
+ // Copy debug location attached to CI.
+ if (!CI->getDebugLoc().isUnknown())
+ NewCall->setDebugLoc(CI->getDebugLoc());
+ CI->eraseFromParent();
+ }
+}
+
+
+void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
+ const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
+ const llvm::FunctionType *Ty = getTypes().GetFunctionType(GD);
+ getCXXABI().getMangleContext().mangleInitDiscriminator();
+ // Get or create the prototype for the function.
+ llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() != Ty) {
+ llvm::GlobalValue *OldFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // F is the Function* for the one with the wrong type, we must make a new
+ // Function* and update everything that used F (a declaration) with the new
+ // Function* (which will be a definition).
+ //
+ // This happens if there is a prototype for a function
+ // (e.g. "int f()") and then a definition of a different type
+ // (e.g. "int f(int x)"). Move the old function aside so that it
+ // doesn't interfere with GetAddrOfFunction.
+ OldFn->setName(llvm::StringRef());
+ llvm::Function *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
+
+ // If this is an implementation of a function without a prototype, try to
+ // replace any existing uses of the function (which may be calls) with uses
+ // of the new function
+ if (D->getType()->isFunctionNoProtoType()) {
+ ReplaceUsesOfNonProtoTypeWithRealFunction(OldFn, NewFn);
+ OldFn->removeDeadConstantUsers();
+ }
+
+ // Replace uses of F with the Function we will endow with a body.
+ if (!Entry->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(NewFn, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Ok, delete the old function now, which is dead.
+ OldFn->eraseFromParent();
+
+ Entry = NewFn;
+ }
+
+ llvm::Function *Fn = cast<llvm::Function>(Entry);
+ setFunctionLinkage(D, Fn);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+
+ if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
+ AddGlobalCtor(Fn, CA->getPriority());
+ if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
+ AddGlobalDtor(Fn, DA->getPriority());
+}
+
+void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
+ const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
+ const AliasAttr *AA = D->getAttr<AliasAttr>();
+ assert(AA && "Not an alias?");
+
+ llvm::StringRef MangledName = getMangledName(GD);
+
+ // If there is a definition in the module, then it wins over the alias.
+ // This is dubious, but allow it to be safe. Just ignore the alias.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry && !Entry->isDeclaration())
+ return;
+
+ const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+
+ // Create a reference to the named value. This ensures that it is emitted
+ // if a deferred decl.
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl());
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
+ llvm::PointerType::getUnqual(DeclTy), 0);
+
+ // Create the new alias itself, but don't set a name yet.
+ llvm::GlobalValue *GA =
+ new llvm::GlobalAlias(Aliasee->getType(),
+ llvm::Function::ExternalLinkage,
+ "", Aliasee, &getModule());
+
+ if (Entry) {
+ assert(Entry->isDeclaration());
+
+ // If there is a declaration in the module, then we had an extern followed
+ // by the alias, as in:
+ // extern int test6();
+ // ...
+ // int test6() __attribute__((alias("test7")));
+ //
+ // Remove it and replace uses of it with the alias.
+ GA->takeName(Entry);
+
+ Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
+ Entry->getType()));
+ Entry->eraseFromParent();
+ } else {
+ GA->setName(MangledName);
+ }
+
+ // Set attributes which are particular to an alias; this is a
+ // specialization of the attributes which may be set on a global
+ // variable/function.
+ if (D->hasAttr<DLLExportAttr>()) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // The dllexport attribute is ignored for undefined symbols.
+ if (FD->hasBody())
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ } else {
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ }
+ } else if (D->hasAttr<WeakAttr>() ||
+ D->hasAttr<WeakRefAttr>() ||
+ D->hasAttr<WeakImportAttr>()) {
+ GA->setLinkage(llvm::Function::WeakAnyLinkage);
+ }
+
+ SetCommonAttributes(D, GA);
+}
+
+/// getBuiltinLibFunction - Given a builtin id for a function like
+/// "__builtin_fabsf", return a Function* for "fabsf".
+llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID) {
+ assert((Context.BuiltinInfo.isLibFunction(BuiltinID) ||
+ Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) &&
+ "isn't a lib fn");
+
+ // Get the name, skip over the __builtin_ prefix (if necessary).
+ const char *Name = Context.BuiltinInfo.GetName(BuiltinID);
+ if (Context.BuiltinInfo.isLibFunction(BuiltinID))
+ Name += 10;
+
+ const llvm::FunctionType *Ty =
+ cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
+
+ return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl(FD));
+}
+
+llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
+ unsigned NumTys) {
+ return llvm::Intrinsic::getDeclaration(&getModule(),
+ (llvm::Intrinsic::ID)IID, Tys, NumTys);
+}
+
+
+llvm::Function *CodeGenModule::getMemCpyFn(const llvm::Type *DestType,
+ const llvm::Type *SrcType,
+ const llvm::Type *SizeType) {
+ const llvm::Type *ArgTypes[3] = {DestType, SrcType, SizeType };
+ return getIntrinsic(llvm::Intrinsic::memcpy, ArgTypes, 3);
+}
+
+llvm::Function *CodeGenModule::getMemMoveFn(const llvm::Type *DestType,
+ const llvm::Type *SrcType,
+ const llvm::Type *SizeType) {
+ const llvm::Type *ArgTypes[3] = {DestType, SrcType, SizeType };
+ return getIntrinsic(llvm::Intrinsic::memmove, ArgTypes, 3);
+}
+
+llvm::Function *CodeGenModule::getMemSetFn(const llvm::Type *DestType,
+ const llvm::Type *SizeType) {
+ const llvm::Type *ArgTypes[2] = { DestType, SizeType };
+ return getIntrinsic(llvm::Intrinsic::memset, ArgTypes, 2);
+}
+
+static llvm::StringMapEntry<llvm::Constant*> &
+GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
+ const StringLiteral *Literal,
+ bool TargetIsLSB,
+ bool &IsUTF16,
+ unsigned &StringLength) {
+ llvm::StringRef String = Literal->getString();
+ unsigned NumBytes = String.size();
+
+ // Check for simple case.
+ if (!Literal->containsNonAsciiOrNull()) {
+ StringLength = NumBytes;
+ return Map.GetOrCreateValue(String);
+ }
+
+ // Otherwise, convert the UTF8 literals into a byte string.
+ llvm::SmallVector<UTF16, 128> ToBuf(NumBytes);
+ const UTF8 *FromPtr = (UTF8 *)String.data();
+ UTF16 *ToPtr = &ToBuf[0];
+
+ ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+ &ToPtr, ToPtr + NumBytes,
+ strictConversion);
+
+ // Check for conversion failure.
+ if (Result != conversionOK) {
+ // FIXME: Have Sema::CheckObjCString() validate the UTF-8 string and remove
+ // this duplicate code.
+ assert(Result == sourceIllegal && "UTF-8 to UTF-16 conversion failed");
+ StringLength = NumBytes;
+ return Map.GetOrCreateValue(String);
+ }
+
+ // ConvertUTF8toUTF16 returns the length in ToPtr.
+ StringLength = ToPtr - &ToBuf[0];
+
+ // Render the UTF-16 string into a byte array and convert to the target byte
+ // order.
+ //
+ // FIXME: This isn't something we should need to do here.
+ llvm::SmallString<128> AsBytes;
+ AsBytes.reserve(StringLength * 2);
+ for (unsigned i = 0; i != StringLength; ++i) {
+ unsigned short Val = ToBuf[i];
+ if (TargetIsLSB) {
+ AsBytes.push_back(Val & 0xFF);
+ AsBytes.push_back(Val >> 8);
+ } else {
+ AsBytes.push_back(Val >> 8);
+ AsBytes.push_back(Val & 0xFF);
+ }
+ }
+ // Append one extra null character, the second is automatically added by our
+ // caller.
+ AsBytes.push_back(0);
+
+ IsUTF16 = true;
+ return Map.GetOrCreateValue(llvm::StringRef(AsBytes.data(), AsBytes.size()));
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ bool isUTF16 = false;
+ llvm::StringMapEntry<llvm::Constant*> &Entry =
+ GetConstantCFStringEntry(CFConstantStringMap, Literal,
+ getTargetData().isLittleEndian(),
+ isUTF16, StringLength);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero =
+ llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ // If we don't already have it, get __CFConstantStringClassReference.
+ if (!CFConstantStringClassRef) {
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ Ty = llvm::ArrayType::get(Ty, 0);
+ llvm::Constant *GV = CreateRuntimeVariable(Ty,
+ "__CFConstantStringClassReference");
+ // Decay array -> ptr
+ CFConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ }
+
+ QualType CFTy = getContext().getCFConstantStringType();
+
+ const llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertType(CFTy));
+
+ std::vector<llvm::Constant*> Fields(4);
+
+ // Class pointer.
+ Fields[0] = CFConstantStringClassRef;
+
+ // Flags.
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
+ llvm::ConstantInt::get(Ty, 0x07C8);
+
+ // String pointer.
+ llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
+
+ llvm::GlobalValue::LinkageTypes Linkage;
+ bool isConstant;
+ if (isUTF16) {
+ // FIXME: why do utf strings get "_" labels instead of "L" labels?
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ // Note: -fwritable-strings doesn't make unicode CFStrings writable, but
+ // does make plain ascii ones writable.
+ isConstant = true;
+ } else {
+ Linkage = llvm::GlobalValue::PrivateLinkage;
+ isConstant = !Features.WritableStrings;
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
+ ".str");
+ if (isUTF16) {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
+ GV->setAlignment(Align.getQuantity());
+ }
+ Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+
+ // String length.
+ Ty = getTypes().ConvertType(getContext().LongTy);
+ Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(STy, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_cfstring_");
+ if (const char *Sect = getContext().Target.getCFStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ bool isUTF16 = false;
+ llvm::StringMapEntry<llvm::Constant*> &Entry =
+ GetConstantCFStringEntry(CFConstantStringMap, Literal,
+ getTargetData().isLittleEndian(),
+ isUTF16, StringLength);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero =
+ llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ // If we don't already have it, get _NSConstantStringClassReference.
+ if (!NSConstantStringClassRef) {
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ Ty = llvm::ArrayType::get(Ty, 0);
+ llvm::Constant *GV = CreateRuntimeVariable(Ty,
+ Features.ObjCNonFragileABI ?
+ "OBJC_CLASS_$_NSConstantString" :
+ "_NSConstantStringClassReference");
+ // Decay array -> ptr
+ NSConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ }
+
+ QualType NSTy = getContext().getNSConstantStringType();
+
+ const llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertType(NSTy));
+
+ std::vector<llvm::Constant*> Fields(3);
+
+ // Class pointer.
+ Fields[0] = NSConstantStringClassRef;
+
+ // String pointer.
+ llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
+
+ llvm::GlobalValue::LinkageTypes Linkage;
+ bool isConstant;
+ if (isUTF16) {
+ // FIXME: why do utf strings get "_" labels instead of "L" labels?
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ // Note: -fwritable-strings doesn't make unicode NSStrings writable, but
+ // does make plain ascii ones writable.
+ isConstant = true;
+ } else {
+ Linkage = llvm::GlobalValue::PrivateLinkage;
+ isConstant = !Features.WritableStrings;
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
+ ".str");
+ if (isUTF16) {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
+ GV->setAlignment(Align.getQuantity());
+ }
+ Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+
+ // String length.
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(STy, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_nsstring_");
+ // FIXME. Fix section.
+ if (const char *Sect =
+ Features.ObjCNonFragileABI
+ ? getContext().Target.getNSStringNonFragileABISection()
+ : getContext().Target.getNSStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
+/// GetStringForStringLiteral - Return the appropriate bytes for a
+/// string literal, properly padded to match the literal type.
+std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
+ const ConstantArrayType *CAT =
+ getContext().getAsConstantArrayType(E->getType());
+ assert(CAT && "String isn't pointer or array!");
+
+ // Resize the string to the right size.
+ uint64_t RealLen = CAT->getSize().getZExtValue();
+
+ if (E->isWide())
+ RealLen *= getContext().Target.getWCharWidth()/8;
+
+ std::string Str = E->getString().str();
+ Str.resize(RealLen, '\0');
+
+ return Str;
+}
+
+/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
+/// constant array for the given string literal.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
+ // FIXME: This can be more efficient.
+ // FIXME: We shouldn't need to bitcast the constant in the wide string case.
+ llvm::Constant *C = GetAddrOfConstantString(GetStringForStringLiteral(S));
+ if (S->isWide()) {
+ llvm::Type *DestTy =
+ llvm::PointerType::getUnqual(getTypes().ConvertType(S->getType()));
+ C = llvm::ConstantExpr::getBitCast(C, DestTy);
+ }
+ return C;
+}
+
+/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+/// array for the given ObjCEncodeExpr node.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
+ std::string Str;
+ getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+
+ return GetAddrOfConstantCString(Str);
+}
+
+
+/// GenerateWritableString -- Creates storage for a string literal.
+static llvm::Constant *GenerateStringLiteral(const std::string &str,
+ bool constant,
+ CodeGenModule &CGM,
+ const char *GlobalName) {
+ // Create Constant for this string literal. Don't add a '\0'.
+ llvm::Constant *C =
+ llvm::ConstantArray::get(CGM.getLLVMContext(), str, false);
+
+ // Create a global variable for this string
+ return new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
+ llvm::GlobalValue::PrivateLinkage,
+ C, GlobalName);
+}
+
+/// GetAddrOfConstantString - Returns a pointer to a character array
+/// containing the literal. This contents are exactly that of the
+/// given string, i.e. it will not be null terminated automatically;
+/// see GetAddrOfConstantCString. Note that whether the result is
+/// actually a pointer to an LLVM constant depends on
+/// Feature.WriteableStrings.
+///
+/// The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantString(const std::string &str,
+ const char *GlobalName) {
+ bool IsConstant = !Features.WritableStrings;
+
+ // Get the default prefix if a name wasn't specified.
+ if (!GlobalName)
+ GlobalName = ".str";
+
+ // Don't share any string literals if strings aren't constant.
+ if (!IsConstant)
+ return GenerateStringLiteral(str, false, *this, GlobalName);
+
+ llvm::StringMapEntry<llvm::Constant *> &Entry =
+ ConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]);
+
+ if (Entry.getValue())
+ return Entry.getValue();
+
+ // Create a global variable for this.
+ llvm::Constant *C = GenerateStringLiteral(str, true, *this, GlobalName);
+ Entry.setValue(C);
+ return C;
+}
+
+/// GetAddrOfConstantCString - Returns a pointer to a character
+/// array containing the literal and a terminating '\-'
+/// character. The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantCString(const std::string &str,
+ const char *GlobalName){
+ return GetAddrOfConstantString(str + '\0', GlobalName);
+}
+
+/// EmitObjCPropertyImplementations - Emit information for synthesized
+/// properties for an implementation.
+void CodeGenModule::EmitObjCPropertyImplementations(const
+ ObjCImplementationDecl *D) {
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = D->propimpl_begin(), e = D->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ // Dynamic is just for type-checking.
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ // Determine which methods need to be implemented, some may have
+ // been overridden. Note that ::isSynthesized is not the method
+ // we want, that just indicates if the decl came from a
+ // property. What we want to know is if the method is defined in
+ // this implementation.
+ if (!D->getInstanceMethod(PD->getGetterName()))
+ CodeGenFunction(*this).GenerateObjCGetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ if (!PD->isReadOnly() &&
+ !D->getInstanceMethod(PD->getSetterName()))
+ CodeGenFunction(*this).GenerateObjCSetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ }
+ }
+}
+
+/// EmitObjCIvarInitializations - Emit information for ivar initialization
+/// for an implementation.
+void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
+ if (!Features.NeXTRuntime || D->getNumIvarInitializers() == 0)
+ return;
+ DeclContext* DC = const_cast<DeclContext*>(dyn_cast<DeclContext>(D));
+ assert(DC && "EmitObjCIvarInitializations - null DeclContext");
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
+ ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(getContext(),
+ D->getLocation(),
+ D->getLocation(), cxxSelector,
+ getContext().VoidTy, 0,
+ DC, true, false, true, false,
+ ObjCMethodDecl::Required);
+ D->addInstanceMethod(DTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
+
+ II = &getContext().Idents.get(".cxx_construct");
+ cxxSelector = getContext().Selectors.getSelector(0, &II);
+ // The constructor returns 'self'.
+ ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(getContext(),
+ D->getLocation(),
+ D->getLocation(), cxxSelector,
+ getContext().getObjCIdType(), 0,
+ DC, true, false, true, false,
+ ObjCMethodDecl::Required);
+ D->addInstanceMethod(CTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
+
+
+}
+
+/// EmitNamespace - Emit all declarations in a namespace.
+void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
+ for (RecordDecl::decl_iterator I = ND->decls_begin(), E = ND->decls_end();
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+// EmitLinkageSpec - Emit all declarations in a linkage spec.
+void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
+ if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
+ LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
+ ErrorUnsupported(LSD, "linkage spec");
+ return;
+ }
+
+ for (RecordDecl::decl_iterator I = LSD->decls_begin(), E = LSD->decls_end();
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+/// EmitTopLevelDecl - Emit code for a single top level declaration.
+void CodeGenModule::EmitTopLevelDecl(Decl *D) {
+ // If an error has occurred, stop code generation, but continue
+ // parsing and semantic analysis (to ensure all warnings and errors
+ // are emitted).
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Ignore dependent declarations.
+ if (D->getDeclContext() && D->getDeclContext()->isDependentContext())
+ return;
+
+ switch (D->getKind()) {
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ // Skip function templates
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate())
+ return;
+
+ EmitGlobal(cast<FunctionDecl>(D));
+ break;
+
+ case Decl::Var:
+ EmitGlobal(cast<VarDecl>(D));
+ break;
+
+ // C++ Decls
+ case Decl::Namespace:
+ EmitNamespace(cast<NamespaceDecl>(D));
+ break;
+ // No code generation needed.
+ case Decl::UsingShadow:
+ case Decl::Using:
+ case Decl::UsingDirective:
+ case Decl::ClassTemplate:
+ case Decl::FunctionTemplate:
+ case Decl::NamespaceAlias:
+ break;
+ case Decl::CXXConstructor:
+ // Skip function templates
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate())
+ return;
+
+ EmitCXXConstructors(cast<CXXConstructorDecl>(D));
+ break;
+ case Decl::CXXDestructor:
+ EmitCXXDestructors(cast<CXXDestructorDecl>(D));
+ break;
+
+ case Decl::StaticAssert:
+ // Nothing to do.
+ break;
+
+ // Objective-C Decls
+
+ // Forward declarations, no (immediate) code generation.
+ case Decl::ObjCClass:
+ case Decl::ObjCForwardProtocol:
+ case Decl::ObjCInterface:
+ break;
+
+ case Decl::ObjCCategory: {
+ ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
+ if (CD->IsClassExtension() && CD->hasSynthBitfield())
+ Context.ResetObjCLayout(CD->getClassInterface());
+ break;
+ }
+
+
+ case Decl::ObjCProtocol:
+ Runtime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
+ break;
+
+ case Decl::ObjCCategoryImpl:
+ // Categories have properties but don't support synthesize so we
+ // can ignore them here.
+ Runtime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
+ break;
+
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
+ if (Features.ObjCNonFragileABI2 && OMD->hasSynthBitfield())
+ Context.ResetObjCLayout(OMD->getClassInterface());
+ EmitObjCPropertyImplementations(OMD);
+ EmitObjCIvarInitializations(OMD);
+ Runtime->GenerateClass(OMD);
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(D);
+ // If this is not a prototype, emit the body.
+ if (OMD->getBody())
+ CodeGenFunction(*this).GenerateObjCMethod(OMD);
+ break;
+ }
+ case Decl::ObjCCompatibleAlias:
+ // compatibility-alias is a directive and has no code gen.
+ break;
+
+ case Decl::LinkageSpec:
+ EmitLinkageSpec(cast<LinkageSpecDecl>(D));
+ break;
+
+ case Decl::FileScopeAsm: {
+ FileScopeAsmDecl *AD = cast<FileScopeAsmDecl>(D);
+ llvm::StringRef AsmString = AD->getAsmString()->getString();
+
+ const std::string &S = getModule().getModuleInlineAsm();
+ if (S.empty())
+ getModule().setModuleInlineAsm(AsmString);
+ else
+ getModule().setModuleInlineAsm(S + '\n' + AsmString.str());
+ break;
+ }
+
+ default:
+ // Make sure we handled everything we should, every other kind is a
+ // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
+ // function. Need to recode Decl::Kind to do that easily.
+ assert(isa<TypeDecl>(D) && "Unsupported decl kind");
+ }
+}
+
+/// Turns the given pointer into a constant.
+static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
+ const void *Ptr) {
+ uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
+ const llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
+ return llvm::ConstantInt::get(i64, PtrInt);
+}
+
+static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
+ llvm::NamedMDNode *&GlobalMetadata,
+ GlobalDecl D,
+ llvm::GlobalValue *Addr) {
+ if (!GlobalMetadata)
+ GlobalMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
+
+ // TODO: should we report variant information for ctors/dtors?
+ llvm::Value *Ops[] = {
+ Addr,
+ GetPointerConstant(CGM.getLLVMContext(), D.getDecl())
+ };
+ GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops, 2));
+}
+
+/// Emits metadata nodes associating all the global values in the
+/// current module with the Decls they came from. This is useful for
+/// projects using IR gen as a subroutine.
+///
+/// Since there's currently no way to associate an MDNode directly
+/// with an llvm::GlobalValue, we create a global named metadata
+/// with the name 'clang.global.decl.ptrs'.
+void CodeGenModule::EmitDeclMetadata() {
+ llvm::NamedMDNode *GlobalMetadata = 0;
+
+ // StaticLocalDeclMap
+ for (llvm::DenseMap<GlobalDecl,llvm::StringRef>::iterator
+ I = MangledDeclNames.begin(), E = MangledDeclNames.end();
+ I != E; ++I) {
+ llvm::GlobalValue *Addr = getModule().getNamedValue(I->second);
+ EmitGlobalDeclMetadata(*this, GlobalMetadata, I->first, Addr);
+ }
+}
+
+/// Emits metadata nodes for all the local variables in the current
+/// function.
+void CodeGenFunction::EmitDeclMetadata() {
+ if (LocalDeclMap.empty()) return;
+
+ llvm::LLVMContext &Context = getLLVMContext();
+
+ // Find the unique metadata ID for this name.
+ unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
+
+ llvm::NamedMDNode *GlobalMetadata = 0;
+
+ for (llvm::DenseMap<const Decl*, llvm::Value*>::iterator
+ I = LocalDeclMap.begin(), E = LocalDeclMap.end(); I != E; ++I) {
+ const Decl *D = I->first;
+ llvm::Value *Addr = I->second;
+
+ if (llvm::AllocaInst *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
+ llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
+ Alloca->setMetadata(DeclPtrKind, llvm::MDNode::get(Context, &DAddr, 1));
+ } else if (llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
+ GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
+ EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
+ }
+ }
+}
+
+///@name Custom Runtime Function Interfaces
+///@{
+//
+// FIXME: These can be eliminated once we can have clients just get the required
+// AST nodes from the builtin tables.
+
+llvm::Constant *CodeGenModule::getBlockObjectDispose() {
+ if (BlockObjectDispose)
+ return BlockObjectDispose;
+
+ // If we saw an explicit decl, use that.
+ if (BlockObjectDisposeDecl) {
+ return BlockObjectDispose = GetAddrOfFunction(
+ BlockObjectDisposeDecl,
+ getTypes().GetFunctionType(BlockObjectDisposeDecl));
+ }
+
+ // Otherwise construct the function by hand.
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ return BlockObjectDispose =
+ CreateRuntimeFunction(FTy, "_Block_object_dispose");
+}
+
+llvm::Constant *CodeGenModule::getBlockObjectAssign() {
+ if (BlockObjectAssign)
+ return BlockObjectAssign;
+
+ // If we saw an explicit decl, use that.
+ if (BlockObjectAssignDecl) {
+ return BlockObjectAssign = GetAddrOfFunction(
+ BlockObjectAssignDecl,
+ getTypes().GetFunctionType(BlockObjectAssignDecl));
+ }
+
+ // Otherwise construct the function by hand.
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ return BlockObjectAssign =
+ CreateRuntimeFunction(FTy, "_Block_object_assign");
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
+ if (NSConcreteGlobalBlock)
+ return NSConcreteGlobalBlock;
+
+ // If we saw an explicit decl, use that.
+ if (NSConcreteGlobalBlockDecl) {
+ return NSConcreteGlobalBlock = GetAddrOfGlobalVar(
+ NSConcreteGlobalBlockDecl,
+ getTypes().ConvertType(NSConcreteGlobalBlockDecl->getType()));
+ }
+
+ // Otherwise construct the variable by hand.
+ return NSConcreteGlobalBlock = CreateRuntimeVariable(
+ PtrToInt8Ty, "_NSConcreteGlobalBlock");
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
+ if (NSConcreteStackBlock)
+ return NSConcreteStackBlock;
+
+ // If we saw an explicit decl, use that.
+ if (NSConcreteStackBlockDecl) {
+ return NSConcreteStackBlock = GetAddrOfGlobalVar(
+ NSConcreteStackBlockDecl,
+ getTypes().ConvertType(NSConcreteStackBlockDecl->getType()));
+ }
+
+ // Otherwise construct the variable by hand.
+ return NSConcreteStackBlock = CreateRuntimeVariable(
+ PtrToInt8Ty, "_NSConcreteStackBlock");
+}
+
+///@}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
new file mode 100644
index 0000000..cabff9e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -0,0 +1,620 @@
+//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-translation-unit state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENMODULE_H
+#define CLANG_CODEGEN_CODEGENMODULE_H
+
+#include "clang/Basic/LangOptions.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "CGBlocks.h"
+#include "CGCall.h"
+#include "CGCXX.h"
+#include "CGVTables.h"
+#include "CodeGenTypes.h"
+#include "GlobalDecl.h"
+#include "Mangle.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/ValueHandle.h"
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+ class LLVMContext;
+}
+
+namespace clang {
+ class TargetCodeGenInfo;
+ class ASTContext;
+ class FunctionDecl;
+ class IdentifierInfo;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCProtocolDecl;
+ class ObjCEncodeExpr;
+ class BlockExpr;
+ class CharUnits;
+ class Decl;
+ class Expr;
+ class Stmt;
+ class StringLiteral;
+ class NamedDecl;
+ class ValueDecl;
+ class VarDecl;
+ class LangOptions;
+ class CodeGenOptions;
+ class Diagnostic;
+ class AnnotateAttr;
+ class CXXDestructorDecl;
+
+namespace CodeGen {
+
+ class CodeGenFunction;
+ class CGCXXABI;
+ class CGDebugInfo;
+ class CGObjCRuntime;
+ class MangleBuffer;
+
+ struct OrderGlobalInits {
+ unsigned int priority;
+ unsigned int lex_order;
+ OrderGlobalInits(unsigned int p, unsigned int l)
+ : priority(p), lex_order(l) {}
+
+ bool operator==(const OrderGlobalInits &RHS) const {
+ return priority == RHS.priority &&
+ lex_order == RHS.lex_order;
+ }
+
+ bool operator<(const OrderGlobalInits &RHS) const {
+ if (priority < RHS.priority)
+ return true;
+
+ return priority == RHS.priority && lex_order < RHS.lex_order;
+ }
+ };
+
+/// CodeGenModule - This class organizes the cross-function state that is used
+/// while generating LLVM code.
+class CodeGenModule : public BlockModule {
+ CodeGenModule(const CodeGenModule&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
+
+ typedef std::vector<std::pair<llvm::Constant*, int> > CtorList;
+
+ ASTContext &Context;
+ const LangOptions &Features;
+ const CodeGenOptions &CodeGenOpts;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
+ Diagnostic &Diags;
+ CGCXXABI &ABI;
+ CodeGenTypes Types;
+
+ /// VTables - Holds information about C++ vtables.
+ CodeGenVTables VTables;
+ friend class CodeGenVTables;
+
+ CGObjCRuntime* Runtime;
+ CGDebugInfo* DebugInfo;
+
+ // WeakRefReferences - A set of references that have only been seen via
+ // a weakref so far. This is used to remove the weak of the reference if we ever
+ // see a direct reference or a definition.
+ llvm::SmallPtrSet<llvm::GlobalValue*, 10> WeakRefReferences;
+
+ /// DeferredDecls - This contains all the decls which have definitions but
+ /// which are deferred for emission and therefore should only be output if
+ /// they are actually used. If a decl is in this, then it is known to have
+ /// not been referenced yet.
+ llvm::StringMap<GlobalDecl> DeferredDecls;
+
+ /// DeferredDeclsToEmit - This is a list of deferred decls which we have seen
+ /// that *are* actually referenced. These get code generated when the module
+ /// is done.
+ std::vector<GlobalDecl> DeferredDeclsToEmit;
+
+ /// LLVMUsed - List of global values which are required to be
+ /// present in the object file; bitcast to i8*. This is used for
+ /// forcing visibility of symbols which may otherwise be optimized
+ /// out.
+ std::vector<llvm::WeakVH> LLVMUsed;
+
+ /// GlobalCtors - Store the list of global constructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalCtors;
+
+ /// GlobalDtors - Store the list of global destructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalDtors;
+
+ /// MangledDeclNames - A map of canonical GlobalDecls to their mangled names.
+ llvm::DenseMap<GlobalDecl, llvm::StringRef> MangledDeclNames;
+ llvm::BumpPtrAllocator MangledNamesAllocator;
+
+ std::vector<llvm::Constant*> Annotations;
+
+ llvm::StringMap<llvm::Constant*> CFConstantStringMap;
+ llvm::StringMap<llvm::Constant*> ConstantStringMap;
+ llvm::DenseMap<const Decl*, llvm::Value*> StaticLocalDeclMap;
+
+ /// CXXGlobalInits - Global variables with initializers that need to run
+ /// before main.
+ std::vector<llvm::Constant*> CXXGlobalInits;
+
+ /// When a C++ decl with an initializer is deferred, null is
+ /// appended to CXXGlobalInits, and the index of that null is placed
+ /// here so that the initializer will be performed in the correct
+ /// order.
+ llvm::DenseMap<const Decl*, unsigned> DelayedCXXInitPosition;
+
+ /// - Global variables with initializers whose order of initialization
+ /// is set by init_priority attribute.
+
+ llvm::SmallVector<std::pair<OrderGlobalInits, llvm::Function*>, 8>
+ PrioritizedCXXGlobalInits;
+
+ /// CXXGlobalDtors - Global destructor functions and arguments that need to
+ /// run on termination.
+ std::vector<std::pair<llvm::WeakVH,llvm::Constant*> > CXXGlobalDtors;
+
+ /// CFConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *CFConstantStringClassRef;
+
+ /// NSConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *NSConstantStringClassRef;
+
+ /// Lazily create the Objective-C runtime
+ void createObjCRuntime();
+
+ llvm::LLVMContext &VMContext;
+
+ /// @name Cache for Blocks Runtime Globals
+ /// @{
+
+ const VarDecl *NSConcreteGlobalBlockDecl;
+ const VarDecl *NSConcreteStackBlockDecl;
+ llvm::Constant *NSConcreteGlobalBlock;
+ llvm::Constant *NSConcreteStackBlock;
+
+ const FunctionDecl *BlockObjectAssignDecl;
+ const FunctionDecl *BlockObjectDisposeDecl;
+ llvm::Constant *BlockObjectAssign;
+ llvm::Constant *BlockObjectDispose;
+
+ /// @}
+public:
+ CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
+ llvm::Module &M, const llvm::TargetData &TD, Diagnostic &Diags);
+
+ ~CodeGenModule();
+
+ /// Release - Finalize LLVM code generation.
+ void Release();
+
+ /// getObjCRuntime() - Return a reference to the configured
+ /// Objective-C runtime.
+ CGObjCRuntime &getObjCRuntime() {
+ if (!Runtime) createObjCRuntime();
+ return *Runtime;
+ }
+
+ /// hasObjCRuntime() - Return true iff an Objective-C runtime has
+ /// been configured.
+ bool hasObjCRuntime() { return !!Runtime; }
+
+ /// getCXXABI() - Return a reference to the configured C++ ABI.
+ CGCXXABI &getCXXABI() { return ABI; }
+
+ llvm::Value *getStaticLocalDeclAddress(const VarDecl *VD) {
+ return StaticLocalDeclMap[VD];
+ }
+ void setStaticLocalDeclAddress(const VarDecl *D,
+ llvm::GlobalVariable *GV) {
+ StaticLocalDeclMap[D] = GV;
+ }
+
+ CGDebugInfo *getDebugInfo() { return DebugInfo; }
+ ASTContext &getContext() const { return Context; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
+ const LangOptions &getLangOptions() const { return Features; }
+ llvm::Module &getModule() const { return TheModule; }
+ CodeGenTypes &getTypes() { return Types; }
+ CodeGenVTables &getVTables() { return VTables; }
+ Diagnostic &getDiags() const { return Diags; }
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ llvm::LLVMContext &getLLVMContext() { return VMContext; }
+ const TargetCodeGenInfo &getTargetCodeGenInfo();
+ bool isTargetDarwin() const;
+
+ /// getDeclVisibilityMode - Compute the visibility of the decl \arg D.
+ LangOptions::VisibilityMode getDeclVisibilityMode(const Decl *D) const;
+
+ /// setGlobalVisibility - Set the visibility for the given LLVM
+ /// GlobalValue.
+ void setGlobalVisibility(llvm::GlobalValue *GV, const Decl *D) const;
+
+ /// setTypeVisibility - Set the visibility for the given global
+ /// value which holds information about a type.
+ void setTypeVisibility(llvm::GlobalValue *GV, const CXXRecordDecl *D,
+ bool IsForRTTI) const;
+
+ llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
+ if (isa<CXXConstructorDecl>(GD.getDecl()))
+ return GetAddrOfCXXConstructor(cast<CXXConstructorDecl>(GD.getDecl()),
+ GD.getCtorType());
+ else if (isa<CXXDestructorDecl>(GD.getDecl()))
+ return GetAddrOfCXXDestructor(cast<CXXDestructorDecl>(GD.getDecl()),
+ GD.getDtorType());
+ else if (isa<FunctionDecl>(GD.getDecl()))
+ return GetAddrOfFunction(GD);
+ else
+ return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
+ }
+
+ /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+ /// given global variable. If Ty is non-null and if the global doesn't exist,
+ /// then it will be greated with the specified type instead of whatever the
+ /// normal requested type would be.
+ llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
+ const llvm::Type *Ty = 0);
+
+ /// GetAddrOfFunction - Return the address of the given function. If Ty is
+ /// non-null, then this function will use the specified type if it has to
+ /// create it.
+ llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
+ const llvm::Type *Ty = 0);
+
+ /// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor
+ /// for the given type.
+ llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
+
+ /// GetAddrOfThunk - Get the address of the thunk for the given global decl.
+ llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
+
+ /// GetWeakRefReference - Get a reference to the target of VD.
+ llvm::Constant *GetWeakRefReference(const ValueDecl *VD);
+
+ /// GetNonVirtualBaseClassOffset - Returns the offset from a derived class to
+ /// a class. Returns null if the offset is 0.
+ llvm::Constant *
+ GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd);
+
+ /// GetStringForStringLiteral - Return the appropriate bytes for a string
+ /// literal, properly padded to match the literal type. If only the address of
+ /// a constant is needed consider using GetAddrOfConstantStringLiteral.
+ std::string GetStringForStringLiteral(const StringLiteral *E);
+
+ /// GetAddrOfConstantCFString - Return a pointer to a constant CFString object
+ /// for the given string.
+ llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
+
+ /// GetAddrOfConstantNSString - Return a pointer to a constant NSString object
+ /// for the given string.
+ llvm::Constant *GetAddrOfConstantNSString(const StringLiteral *Literal);
+
+ /// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
+ /// for the given string literal.
+ llvm::Constant *GetAddrOfConstantStringFromLiteral(const StringLiteral *S);
+
+ /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+ /// array for the given ObjCEncodeExpr node.
+ llvm::Constant *GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
+
+ /// GetAddrOfConstantString - Returns a pointer to a character array
+ /// containing the literal. This contents are exactly that of the given
+ /// string, i.e. it will not be null terminated automatically; see
+ /// GetAddrOfConstantCString. Note that whether the result is actually a
+ /// pointer to an LLVM constant depends on Feature.WriteableStrings.
+ ///
+ /// The result has pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global
+ /// (if one is created).
+ llvm::Constant *GetAddrOfConstantString(const std::string& str,
+ const char *GlobalName=0);
+
+ /// GetAddrOfConstantCString - Returns a pointer to a character array
+ /// containing the literal and a terminating '\0' character. The result has
+ /// pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global (if one is
+ /// created).
+ llvm::Constant *GetAddrOfConstantCString(const std::string &str,
+ const char *GlobalName=0);
+
+ /// GetAddrOfCXXConstructor - Return the address of the constructor of the
+ /// given type.
+ llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type);
+
+ /// GetAddrOfCXXDestructor - Return the address of the constructor of the
+ /// given type.
+ llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ /// getBuiltinLibFunction - Given a builtin id for a function like
+ /// "__builtin_fabsf", return a Function* for "fabsf".
+ llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID);
+
+ llvm::Function *getMemCpyFn(const llvm::Type *DestType,
+ const llvm::Type *SrcType,
+ const llvm::Type *SizeType);
+
+ llvm::Function *getMemMoveFn(const llvm::Type *DestType,
+ const llvm::Type *SrcType,
+ const llvm::Type *SizeType);
+
+ llvm::Function *getMemSetFn(const llvm::Type *DestType,
+ const llvm::Type *SizeType);
+
+ llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0,
+ unsigned NumTys = 0);
+
+ /// EmitTopLevelDecl - Emit code for a single top level declaration.
+ void EmitTopLevelDecl(Decl *D);
+
+ /// AddUsedGlobal - Add a global which should be forced to be
+ /// present in the object file; these are emitted to the llvm.used
+ /// metadata global.
+ void AddUsedGlobal(llvm::GlobalValue *GV);
+
+ void AddAnnotation(llvm::Constant *C) { Annotations.push_back(C); }
+
+ /// AddCXXDtorEntry - Add a destructor and object to add to the C++ global
+ /// destructor function.
+ void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object) {
+ CXXGlobalDtors.push_back(std::make_pair(DtorFn, Object));
+ }
+
+ /// CreateRuntimeFunction - Create a new runtime function with the specified
+ /// type and name.
+ llvm::Constant *CreateRuntimeFunction(const llvm::FunctionType *Ty,
+ llvm::StringRef Name);
+ /// CreateRuntimeVariable - Create a new runtime global variable with the
+ /// specified type and name.
+ llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
+ llvm::StringRef Name);
+
+ ///@name Custom Blocks Runtime Interfaces
+ ///@{
+
+ llvm::Constant *getNSConcreteGlobalBlock();
+ llvm::Constant *getNSConcreteStackBlock();
+ llvm::Constant *getBlockObjectAssign();
+ llvm::Constant *getBlockObjectDispose();
+
+ ///@}
+
+ void UpdateCompletedType(const TagDecl *TD) {
+ // Make sure that this type is translated.
+ Types.UpdateCompletedType(TD);
+ }
+
+ /// EmitConstantExpr - Try to emit the given expression as a
+ /// constant; returns 0 if the expression cannot be emitted as a
+ /// constant.
+ llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitNullConstant - Return the result of value-initializing the given
+ /// type, i.e. a null expression of the given type. This is usually,
+ /// but not always, an LLVM null constant.
+ llvm::Constant *EmitNullConstant(QualType T);
+
+ llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA, unsigned LineNo);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified decl yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError=false);
+
+ /// SetInternalFunctionAttributes - Set the attributes on the LLVM
+ /// function for the given decl and function info. This applies
+ /// attributes necessary for handling the ABI as well as user
+ /// specified attributes like section.
+ void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F,
+ const CGFunctionInfo &FI);
+
+ /// SetLLVMFunctionAttributes - Set the LLVM function attributes
+ /// (sext, zext, etc).
+ void SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F);
+
+ /// SetLLVMFunctionAttributesForDefinition - Set the LLVM function attributes
+ /// which only apply to a function definintion.
+ void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
+
+ /// ReturnTypeUsesSRet - Return true iff the given type uses 'sret' when used
+ /// as a return type.
+ bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
+
+ /// ReturnTypeUsesSret - Return true iff the given type uses 'fpret' when used
+ /// as a return type.
+ bool ReturnTypeUsesFPRet(QualType ResultType);
+
+ /// ConstructAttributeList - Get the LLVM attributes and calling convention to
+ /// use for a particular function type.
+ ///
+ /// \param Info - The function type information.
+ /// \param TargetDecl - The decl these attributes are being constructed
+ /// for. If supplied the attributes applied to this decl may contribute to the
+ /// function attributes and calling convention.
+ /// \param PAL [out] - On return, the attribute list to use.
+ /// \param CallingConv [out] - On return, the LLVM calling convention to use.
+ void ConstructAttributeList(const CGFunctionInfo &Info,
+ const Decl *TargetDecl,
+ AttributeListType &PAL,
+ unsigned &CallingConv);
+
+ llvm::StringRef getMangledName(GlobalDecl GD);
+ void getMangledName(GlobalDecl GD, MangleBuffer &Buffer, const BlockDecl *BD);
+
+ void EmitTentativeDefinition(const VarDecl *D);
+
+ void EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired);
+
+ llvm::GlobalVariable::LinkageTypes
+ getFunctionLinkage(const FunctionDecl *FD);
+
+ void setFunctionLinkage(const FunctionDecl *FD, llvm::GlobalValue *V) {
+ V->setLinkage(getFunctionLinkage(FD));
+ }
+
+ /// getVTableLinkage - Return the appropriate linkage for the vtable, VTT,
+ /// and type information of the given class.
+ static llvm::GlobalVariable::LinkageTypes
+ getVTableLinkage(const CXXRecordDecl *RD);
+
+ /// GetTargetTypeStoreSize - Return the store size, in character units, of
+ /// the given LLVM type.
+ CharUnits GetTargetTypeStoreSize(const llvm::Type *Ty) const;
+
+ std::vector<const CXXRecordDecl*> DeferredVTables;
+
+private:
+ llvm::GlobalValue *GetGlobalValue(llvm::StringRef Ref);
+
+ llvm::Constant *GetOrCreateLLVMFunction(llvm::StringRef MangledName,
+ const llvm::Type *Ty,
+ GlobalDecl D);
+ llvm::Constant *GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
+ const llvm::PointerType *PTy,
+ const VarDecl *D);
+
+ /// SetCommonAttributes - Set attributes which are common to any
+ /// form of a global definition (alias, Objective-C method,
+ /// function, global variable).
+ ///
+ /// NOTE: This should only be called for definitions.
+ void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+ /// SetFunctionDefinitionAttributes - Set attributes for a global definition.
+ void SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV);
+
+ /// SetFunctionAttributes - Set function attributes for a function
+ /// declaration.
+ void SetFunctionAttributes(GlobalDecl GD,
+ llvm::Function *F,
+ bool IsIncompleteFunction);
+
+ /// EmitGlobal - Emit code for a singal global function or var decl. Forward
+ /// declarations are emitted lazily.
+ void EmitGlobal(GlobalDecl D);
+
+ void EmitGlobalDefinition(GlobalDecl D);
+
+ void EmitGlobalFunctionDefinition(GlobalDecl GD);
+ void EmitGlobalVarDefinition(const VarDecl *D);
+ void EmitAliasDefinition(GlobalDecl GD);
+ void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
+ void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
+
+ // C++ related functions.
+
+ bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target);
+ bool TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D);
+
+ void EmitNamespace(const NamespaceDecl *D);
+ void EmitLinkageSpec(const LinkageSpecDecl *D);
+
+ /// EmitCXXConstructors - Emit constructors (base, complete) from a
+ /// C++ constructor Decl.
+ void EmitCXXConstructors(const CXXConstructorDecl *D);
+
+ /// EmitCXXConstructor - Emit a single constructor with the given type from
+ /// a C++ constructor Decl.
+ void EmitCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type);
+
+ /// EmitCXXDestructors - Emit destructors (base, complete) from a
+ /// C++ destructor Decl.
+ void EmitCXXDestructors(const CXXDestructorDecl *D);
+
+ /// EmitCXXDestructor - Emit a single destructor with the given type from
+ /// a C++ destructor Decl.
+ void EmitCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type);
+
+ /// EmitCXXGlobalInitFunc - Emit the function that initializes C++ globals.
+ void EmitCXXGlobalInitFunc();
+
+ /// EmitCXXGlobalDtorFunc - Emit the function that destroys C++ globals.
+ void EmitCXXGlobalDtorFunc();
+
+ void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D);
+
+ // FIXME: Hardcoding priority here is gross.
+ void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
+ void AddGlobalDtor(llvm::Function *Dtor, int Priority=65535);
+
+ /// EmitCtorList - Generates a global array of functions and priorities using
+ /// the given list and name. This array will have appending linkage and is
+ /// suitable for use as a LLVM constructor or destructor array.
+ void EmitCtorList(const CtorList &Fns, const char *GlobalName);
+
+ void EmitAnnotations(void);
+
+ /// EmitFundamentalRTTIDescriptor - Emit the RTTI descriptors for the
+ /// given type.
+ void EmitFundamentalRTTIDescriptor(QualType Type);
+
+ /// EmitFundamentalRTTIDescriptors - Emit the RTTI descriptors for the
+ /// builtin types.
+ void EmitFundamentalRTTIDescriptors();
+
+ /// EmitDeferred - Emit any needed decls for which code generation
+ /// was deferred.
+ void EmitDeferred(void);
+
+ /// EmitLLVMUsed - Emit the llvm.used metadata used to force
+ /// references to global which may otherwise be optimized out.
+ void EmitLLVMUsed(void);
+
+ void EmitDeclMetadata();
+
+ /// MayDeferGeneration - Determine if the given decl can be emitted
+ /// lazily; this is only relevant for definitions. The given decl
+ /// must be either a function or var decl.
+ bool MayDeferGeneration(const ValueDecl *D);
+};
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
new file mode 100644
index 0000000..5ab65c5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -0,0 +1,516 @@
+//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTypes.h"
+#include "CGCall.h"
+#include "CGCXXABI.h"
+#include "CGRecordLayout.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
+ const llvm::TargetData &TD, const ABIInfo &Info,
+ CGCXXABI &CXXABI)
+ : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
+ TheABIInfo(Info), TheCXXABI(CXXABI) {
+}
+
+CodeGenTypes::~CodeGenTypes() {
+ for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
+ I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
+ I != E; ++I)
+ delete I->second;
+
+ for (llvm::FoldingSet<CGFunctionInfo>::iterator
+ I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
+ delete &*I++;
+}
+
+/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
+/// pointers that are referenced but have not been converted yet. This is used
+/// to handle cyclic structures properly.
+void CodeGenTypes::HandleLateResolvedPointers() {
+ assert(!PointersToResolve.empty() && "No pointers to resolve!");
+
+ // Any pointers that were converted deferred evaluation of their pointee type,
+ // creating an opaque type instead. This is in order to avoid problems with
+ // circular types. Loop through all these defered pointees, if any, and
+ // resolve them now.
+ while (!PointersToResolve.empty()) {
+ std::pair<QualType, llvm::OpaqueType*> P = PointersToResolve.pop_back_val();
+
+ // We can handle bare pointers here because we know that the only pointers
+ // to the Opaque type are P.second and from other types. Refining the
+ // opqaue type away will invalidate P.second, but we don't mind :).
+ const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
+ P.second->refineAbstractTypeTo(NT);
+ }
+}
+
+
+/// ConvertType - Convert the specified type to its LLVM form.
+const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) {
+ const llvm::Type *Result = ConvertTypeRecursive(T);
+
+ // If this is a top-level call to ConvertType and sub-conversions caused
+ // pointers to get lazily built as opaque types, resolve the pointers, which
+ // might cause Result to be merged away.
+ if (!IsRecursive && !PointersToResolve.empty()) {
+ llvm::PATypeHolder ResultHandle = Result;
+ HandleLateResolvedPointers();
+ Result = ResultHandle;
+ }
+ return Result;
+}
+
+const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
+ T = Context.getCanonicalType(T);
+
+ // See if type is already cached.
+ llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
+ I = TypeCache.find(T.getTypePtr());
+ // If type is found in map and this is not a definition for a opaque
+ // place holder type then use it. Otherwise, convert type T.
+ if (I != TypeCache.end())
+ return I->second.get();
+
+ const llvm::Type *ResultType = ConvertNewType(T);
+ TypeCache.insert(std::make_pair(T.getTypePtr(),
+ llvm::PATypeHolder(ResultType)));
+ return ResultType;
+}
+
+/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+/// ConvertType in that it is used to convert to the memory representation for
+/// a type. For example, the scalar representation for _Bool is i1, but the
+/// memory representation is usually i8 or i32, depending on the target.
+const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){
+ const llvm::Type *R = ConvertType(T, IsRecursive);
+
+ // If this is a non-bool type, don't map it.
+ if (!R->isIntegerTy(1))
+ return R;
+
+ // Otherwise, return an integer of the target-specified size.
+ return llvm::IntegerType::get(getLLVMContext(),
+ (unsigned)Context.getTypeSize(T));
+
+}
+
+// Code to verify a given function type is complete, i.e. the return type
+// and all of the argument types are complete.
+const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) {
+ const FunctionType *FT = cast<FunctionType>(T);
+ if (const TagType* TT = FT->getResultType()->getAs<TagType>())
+ if (!TT->getDecl()->isDefinition())
+ return TT;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
+ for (unsigned i = 0; i < FPT->getNumArgs(); i++)
+ if (const TagType* TT = FPT->getArgType(i)->getAs<TagType>())
+ if (!TT->getDecl()->isDefinition())
+ return TT;
+ return 0;
+}
+
+/// UpdateCompletedType - When we find the full definition for a TagDecl,
+/// replace the 'opaque' type we previously made for it if applicable.
+void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+ const Type *Key = Context.getTagDeclType(TD).getTypePtr();
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+ TagDeclTypes.find(Key);
+ if (TDTI == TagDeclTypes.end()) return;
+
+ // Remember the opaque LLVM type for this tagdecl.
+ llvm::PATypeHolder OpaqueHolder = TDTI->second;
+ assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
+ "Updating compilation of an already non-opaque type?");
+
+ // Remove it from TagDeclTypes so that it will be regenerated.
+ TagDeclTypes.erase(TDTI);
+
+ // Generate the new type.
+ const llvm::Type *NT = ConvertTagDeclType(TD);
+
+ // Refine the old opaque type to its new definition.
+ cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
+
+ // Since we just completed a tag type, check to see if any function types
+ // were completed along with the tag type.
+ // FIXME: This is very inefficient; if we track which function types depend
+ // on which tag types, though, it should be reasonably efficient.
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
+ for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
+ if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
+ // This function type still depends on an incomplete tag type; make sure
+ // that tag type has an associated opaque type.
+ ConvertTagDeclType(TT->getDecl());
+ } else {
+ // This function no longer depends on an incomplete tag type; create the
+ // function type, and refine the opaque type to the new function type.
+ llvm::PATypeHolder OpaqueHolder = i->second;
+ const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
+ cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
+ FunctionTypes.erase(i);
+ }
+ }
+}
+
+static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
+ const llvm::fltSemantics &format) {
+ if (&format == &llvm::APFloat::IEEEsingle)
+ return llvm::Type::getFloatTy(VMContext);
+ if (&format == &llvm::APFloat::IEEEdouble)
+ return llvm::Type::getDoubleTy(VMContext);
+ if (&format == &llvm::APFloat::IEEEquad)
+ return llvm::Type::getFP128Ty(VMContext);
+ if (&format == &llvm::APFloat::PPCDoubleDouble)
+ return llvm::Type::getPPC_FP128Ty(VMContext);
+ if (&format == &llvm::APFloat::x87DoubleExtended)
+ return llvm::Type::getX86_FP80Ty(VMContext);
+ assert(0 && "Unknown float format!");
+ return 0;
+}
+
+const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
+ const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr();
+
+ switch (Ty.getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Non-canonical or dependent types aren't possible.");
+ break;
+
+ case Type::Builtin: {
+ switch (cast<BuiltinType>(Ty).getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ // LLVM void type can only be used as the result of a function call. Just
+ // map to the same as char.
+ return llvm::Type::getInt8Ty(getLLVMContext());
+
+ case BuiltinType::Bool:
+ // Note that we always return bool as i1 for use as a scalar type.
+ return llvm::Type::getInt1Ty(getLLVMContext());
+
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::WChar:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ return llvm::IntegerType::get(getLLVMContext(),
+ static_cast<unsigned>(Context.getTypeSize(T)));
+
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ return getTypeForFormat(getLLVMContext(),
+ Context.getFloatTypeSemantics(T));
+
+ case BuiltinType::NullPtr: {
+ // Model std::nullptr_t as i8*
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
+ return llvm::PointerType::getUnqual(Ty);
+ }
+
+ case BuiltinType::UInt128:
+ case BuiltinType::Int128:
+ return llvm::IntegerType::get(getLLVMContext(), 128);
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ case BuiltinType::UndeducedAuto:
+ assert(0 && "Unexpected builtin type!");
+ break;
+ }
+ assert(0 && "Unknown builtin type!");
+ break;
+ }
+ case Type::Complex: {
+ const llvm::Type *EltTy =
+ ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
+ return llvm::StructType::get(TheModule.getContext(), EltTy, EltTy, NULL);
+ }
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ const ReferenceType &RTy = cast<ReferenceType>(Ty);
+ QualType ETy = RTy.getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
+ PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+ }
+ case Type::Pointer: {
+ const PointerType &PTy = cast<PointerType>(Ty);
+ QualType ETy = PTy.getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
+ PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+ }
+
+ case Type::VariableArray: {
+ const VariableArrayType &A = cast<VariableArrayType>(Ty);
+ assert(A.getIndexTypeCVRQualifiers() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // VLAs resolve to the innermost element type; this matches
+ // the return of alloca, and there isn't any obviously better choice.
+ return ConvertTypeForMemRecursive(A.getElementType());
+ }
+ case Type::IncompleteArray: {
+ const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
+ assert(A.getIndexTypeCVRQualifiers() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // int X[] -> [0 x int]
+ return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()),
+ 0);
+ }
+ case Type::ConstantArray: {
+ const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
+ const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
+ return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ const VectorType &VT = cast<VectorType>(Ty);
+ return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
+ VT.getNumElements());
+ }
+ case Type::FunctionNoProto:
+ case Type::FunctionProto: {
+ // First, check whether we can build the full function type. If the
+ // function type depends on an incomplete type (e.g. a struct or enum), we
+ // cannot lower the function type. Instead, turn it into an Opaque pointer
+ // and have UpdateCompletedType revisit the function type when/if the opaque
+ // argument type is defined.
+ if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) {
+ // This function's type depends on an incomplete tag type; make sure
+ // we have an opaque type corresponding to the tag type.
+ ConvertTagDeclType(TT->getDecl());
+ // Create an opaque type for this function type, save it, and return it.
+ llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
+ FunctionTypes.insert(std::make_pair(&Ty, ResultType));
+ return ResultType;
+ }
+
+ // The function type can be built; call the appropriate routines to
+ // build it.
+ const CGFunctionInfo *FI;
+ bool isVariadic;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) {
+ FI = &getFunctionInfo(
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)),
+ true /*Recursive*/);
+ isVariadic = FPT->isVariadic();
+ } else {
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
+ FI = &getFunctionInfo(
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)),
+ true /*Recursive*/);
+ isVariadic = true;
+ }
+
+ return GetFunctionType(*FI, isVariadic, true);
+ }
+
+ case Type::ObjCObject:
+ return ConvertTypeRecursive(cast<ObjCObjectType>(Ty).getBaseType());
+
+ case Type::ObjCInterface: {
+ // Objective-C interfaces are always opaque (outside of the
+ // runtime, which can do whatever it likes); we never refine
+ // these.
+ const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
+ if (!T)
+ T = llvm::OpaqueType::get(getLLVMContext());
+ return T;
+ }
+
+ case Type::ObjCObjectPointer: {
+ // Protocol qualifications do not influence the LLVM type, we just return a
+ // pointer to the underlying interface type. We don't need to worry about
+ // recursive conversion.
+ const llvm::Type *T =
+ ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType());
+ return llvm::PointerType::getUnqual(T);
+ }
+
+ case Type::Record:
+ case Type::Enum: {
+ const TagDecl *TD = cast<TagType>(Ty).getDecl();
+ const llvm::Type *Res = ConvertTagDeclType(TD);
+
+ std::string TypeName(TD->getKindName());
+ TypeName += '.';
+
+ // Name the codegen type after the typedef name
+ // if there is no tag type name available
+ if (TD->getIdentifier())
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ TypeName += TD->getDeclContext() ? TD->getQualifiedNameAsString() :
+ TD->getNameAsString();
+ else if (const TypedefType *TdT = dyn_cast<TypedefType>(T))
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ TypeName += TdT->getDecl()->getDeclContext() ?
+ TdT->getDecl()->getQualifiedNameAsString() :
+ TdT->getDecl()->getNameAsString();
+ else
+ TypeName += "anon";
+
+ TheModule.addTypeName(TypeName, Res);
+ return Res;
+ }
+
+ case Type::BlockPointer: {
+ const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
+ PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
+ return llvm::PointerType::get(PointeeType, FTy.getAddressSpace());
+ }
+
+ case Type::MemberPointer: {
+ return getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(&Ty));
+ }
+ }
+
+ // FIXME: implement.
+ return llvm::OpaqueType::get(getLLVMContext());
+}
+
+/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
+/// enum.
+const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
+ // TagDecl's are not necessarily unique, instead use the (clang)
+ // type connected to the decl.
+ const Type *Key =
+ Context.getTagDeclType(TD).getTypePtr();
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+ TagDeclTypes.find(Key);
+
+ // If we've already compiled this tag type, use the previous definition.
+ if (TDTI != TagDeclTypes.end())
+ return TDTI->second;
+
+ // If this is still a forward declaration, just define an opaque
+ // type to use for this tagged decl.
+ if (!TD->isDefinition()) {
+ llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
+ TagDeclTypes.insert(std::make_pair(Key, ResultType));
+ return ResultType;
+ }
+
+ // Okay, this is a definition of a type. Compile the implementation now.
+
+ if (TD->isEnum()) // Don't bother storing enums in TagDeclTypes.
+ return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
+
+ // This decl could well be recursive. In this case, insert an opaque
+ // definition of this type, which the recursive uses will get. We will then
+ // refine this opaque version later.
+
+ // Create new OpaqueType now for later use in case this is a recursive
+ // type. This will later be refined to the actual type.
+ llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext());
+ TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
+
+ const RecordDecl *RD = cast<const RecordDecl>(TD);
+
+ // Force conversion of non-virtual base classes recursively.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+ e = RD->bases_end(); i != e; ++i) {
+ if (!i->isVirtual()) {
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+ ConvertTagDeclType(Base);
+ }
+ }
+ }
+
+ // Layout fields.
+ CGRecordLayout *Layout = ComputeRecordLayout(RD);
+
+ CGRecordLayouts[Key] = Layout;
+ const llvm::Type *ResultType = Layout->getLLVMType();
+
+ // Refine our Opaque type to ResultType. This can invalidate ResultType, so
+ // make sure to read the result out of the holder.
+ cast<llvm::OpaqueType>(ResultHolder.get())
+ ->refineAbstractTypeTo(ResultType);
+
+ return ResultHolder.get();
+}
+
+/// getCGRecordLayout - Return record layout info for the given llvm::Type.
+const CGRecordLayout &
+CodeGenTypes::getCGRecordLayout(const RecordDecl *TD) const {
+ const Type *Key = Context.getTagDeclType(TD).getTypePtr();
+ const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
+ assert(Layout && "Unable to find record layout information for type");
+ return *Layout;
+}
+
+bool CodeGenTypes::isZeroInitializable(QualType T) {
+ // No need to check for member pointers when not compiling C++.
+ if (!Context.getLangOptions().CPlusPlus)
+ return true;
+
+ T = Context.getBaseElementType(T);
+
+ // Records are non-zero-initializable if they contain any
+ // non-zero-initializable subobjects.
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return isZeroInitializable(RD);
+ }
+
+ // We have to ask the ABI about member pointers.
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
+ return getCXXABI().isZeroInitializable(MPT);
+
+ // Everything else is okay.
+ return true;
+}
+
+bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) {
+
+ // FIXME: It would be better if there was a way to explicitly compute the
+ // record layout instead of converting to a type.
+ ConvertTagDeclType(RD);
+
+ const CGRecordLayout &Layout = getCGRecordLayout(RD);
+ return Layout.isZeroInitializable();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
new file mode 100644
index 0000000..1fc2153
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
@@ -0,0 +1,224 @@
+//===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTYPES_H
+#define CLANG_CODEGEN_CODEGENTYPES_H
+
+#include "CGCall.h"
+#include "GlobalDecl.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include <vector>
+
+namespace llvm {
+ class FunctionType;
+ class Module;
+ class OpaqueType;
+ class PATypeHolder;
+ class TargetData;
+ class Type;
+ class LLVMContext;
+}
+
+namespace clang {
+ class ABIInfo;
+ class ASTContext;
+ template <typename> class CanQual;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class FieldDecl;
+ class FunctionProtoType;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class PointerType;
+ class QualType;
+ class RecordDecl;
+ class TagDecl;
+ class TargetInfo;
+ class Type;
+ typedef CanQual<Type> CanQualType;
+
+namespace CodeGen {
+ class CGCXXABI;
+ class CGRecordLayout;
+
+/// CodeGenTypes - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTypes {
+ ASTContext &Context;
+ const TargetInfo &Target;
+ llvm::Module& TheModule;
+ const llvm::TargetData& TheTargetData;
+ const ABIInfo& TheABIInfo;
+ CGCXXABI &TheCXXABI;
+
+ llvm::SmallVector<std::pair<QualType,
+ llvm::OpaqueType *>, 8> PointersToResolve;
+
+ llvm::DenseMap<const Type*, llvm::PATypeHolder> TagDeclTypes;
+
+ llvm::DenseMap<const Type*, llvm::PATypeHolder> FunctionTypes;
+
+ /// The opaque type map for Objective-C interfaces. All direct
+ /// manipulation is done by the runtime interfaces, which are
+ /// responsible for coercing to the appropriate type; these opaque
+ /// types are never refined.
+ llvm::DenseMap<const ObjCInterfaceType*, const llvm::Type *> InterfaceTypes;
+
+ /// CGRecordLayouts - This maps llvm struct type with corresponding
+ /// record layout info.
+ llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+
+ /// FunctionInfos - Hold memoized CGFunctionInfo results.
+ llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+
+private:
+ /// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder)
+ /// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
+ /// used instead of llvm::Type because it allows us to bypass potential
+ /// dangling type pointers due to type refinement on llvm side.
+ llvm::DenseMap<Type *, llvm::PATypeHolder> TypeCache;
+
+ /// ConvertNewType - Convert type T into a llvm::Type. Do not use this
+ /// method directly because it does not do any type caching. This method
+ /// is available only for ConvertType(). CovertType() is preferred
+ /// interface to convert type T into a llvm::Type.
+ const llvm::Type *ConvertNewType(QualType T);
+
+ /// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
+ /// pointers that are referenced but have not been converted yet. This is
+ /// used to handle cyclic structures properly.
+ void HandleLateResolvedPointers();
+
+public:
+ CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
+ const ABIInfo &Info, CGCXXABI &CXXABI);
+ ~CodeGenTypes();
+
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const TargetInfo &getTarget() const { return Target; }
+ ASTContext &getContext() const { return Context; }
+ const ABIInfo &getABIInfo() const { return TheABIInfo; }
+ CGCXXABI &getCXXABI() const { return TheCXXABI; }
+ llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
+
+ /// ConvertType - Convert type T into a llvm::Type.
+ const llvm::Type *ConvertType(QualType T, bool IsRecursive = false);
+ const llvm::Type *ConvertTypeRecursive(QualType T);
+
+ /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+ /// ConvertType in that it is used to convert to the memory representation for
+ /// a type. For example, the scalar representation for _Bool is i1, but the
+ /// memory representation is usually i8 or i32, depending on the target.
+ const llvm::Type *ConvertTypeForMem(QualType T, bool IsRecursive = false);
+ const llvm::Type *ConvertTypeForMemRecursive(QualType T) {
+ return ConvertTypeForMem(T, true);
+ }
+
+ /// GetFunctionType - Get the LLVM function type for \arg Info.
+ const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
+ bool IsVariadic,
+ bool IsRecursive = false);
+
+ const llvm::FunctionType *GetFunctionType(GlobalDecl GD);
+
+ /// VerifyFuncTypeComplete - Utility to check whether a function type can
+ /// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag
+ /// type).
+ static const TagType *VerifyFuncTypeComplete(const Type* T);
+
+ /// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
+ /// given a CXXMethodDecl. If the method to has an incomplete return type,
+ /// and/or incomplete argument types, this will return the opaque type.
+ const llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
+
+ const CGRecordLayout &getCGRecordLayout(const RecordDecl*) const;
+
+ /// UpdateCompletedType - When we find the full definition for a TagDecl,
+ /// replace the 'opaque' type we previously made for it if applicable.
+ void UpdateCompletedType(const TagDecl *TD);
+
+ /// getFunctionInfo - Get the function info for the specified function decl.
+ const CGFunctionInfo &getFunctionInfo(GlobalDecl GD);
+
+ const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
+ const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
+ const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
+ const CGFunctionInfo &getFunctionInfo(const CXXConstructorDecl *D,
+ CXXCtorType Type);
+ const CGFunctionInfo &getFunctionInfo(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ const CGFunctionInfo &getFunctionInfo(const CallArgList &Args,
+ const FunctionType *Ty) {
+ return getFunctionInfo(Ty->getResultType(), Args,
+ Ty->getExtInfo());
+ }
+
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty,
+ bool IsRecursive = false);
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty,
+ bool IsRecursive = false);
+
+ /// getFunctionInfo - Get the function info for a member function of
+ /// the given type. This is used for calls through member function
+ /// pointers.
+ const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP);
+
+ /// getFunctionInfo - Get the function info for a function described by a
+ /// return type and argument types. If the calling convention is not
+ /// specified, the "C" calling convention will be used.
+ const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+ const CallArgList &Args,
+ const FunctionType::ExtInfo &Info);
+ const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+ const FunctionArgList &Args,
+ const FunctionType::ExtInfo &Info);
+
+ /// Retrieves the ABI information for the given function signature.
+ ///
+ /// \param ArgTys - must all actually be canonical as params
+ const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
+ const llvm::SmallVectorImpl<CanQualType> &ArgTys,
+ const FunctionType::ExtInfo &Info,
+ bool IsRecursive = false);
+
+ /// \brief Compute a new LLVM record layout object for the given record.
+ CGRecordLayout *ComputeRecordLayout(const RecordDecl *D);
+
+public: // These are internal details of CGT that shouldn't be used externally.
+ /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
+ /// enum.
+ const llvm::Type *ConvertTagDeclType(const TagDecl *TD);
+
+ /// GetExpandedTypes - Expand the type \arg Ty into the LLVM
+ /// argument types it would be passed as on the provided vector \arg
+ /// ArgTys. See ABIArgInfo::Expand.
+ void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys,
+ bool IsRecursive);
+
+ /// IsZeroInitializable - Return whether a type can be
+ /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
+ bool isZeroInitializable(QualType T);
+
+ /// IsZeroInitializable - Return whether a record type can be
+ /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
+ bool isZeroInitializable(const CXXRecordDecl *RD);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
new file mode 100644
index 0000000..26dea40
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
@@ -0,0 +1,121 @@
+//===--- GlobalDecl.h - Global declaration holder ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A GlobalDecl can hold either a regular variable/function or a C++ ctor/dtor
+// together with its type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_GLOBALDECL_H
+#define CLANG_CODEGEN_GLOBALDECL_H
+
+#include "CGCXX.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+
+namespace clang {
+
+namespace CodeGen {
+
+/// GlobalDecl - represents a global declaration. This can either be a
+/// CXXConstructorDecl and the constructor type (Base, Complete).
+/// a CXXDestructorDecl and the destructor type (Base, Complete) or
+/// a VarDecl, a FunctionDecl or a BlockDecl.
+class GlobalDecl {
+ llvm::PointerIntPair<const Decl*, 2> Value;
+
+ void Init(const Decl *D) {
+ assert(!isa<CXXConstructorDecl>(D) && "Use other ctor with ctor decls!");
+ assert(!isa<CXXDestructorDecl>(D) && "Use other ctor with dtor decls!");
+
+ Value.setPointer(D);
+ }
+
+public:
+ GlobalDecl() {}
+
+ GlobalDecl(const VarDecl *D) { Init(D);}
+ GlobalDecl(const FunctionDecl *D) { Init(D); }
+ GlobalDecl(const BlockDecl *D) { Init(D); }
+ GlobalDecl(const ObjCMethodDecl *D) { Init(D); }
+
+ GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type)
+ : Value(D, Type) {}
+ GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
+ : Value(D, Type) {}
+
+ GlobalDecl getCanonicalDecl() const {
+ GlobalDecl CanonGD;
+ CanonGD.Value.setPointer(Value.getPointer()->getCanonicalDecl());
+ CanonGD.Value.setInt(Value.getInt());
+
+ return CanonGD;
+ }
+
+ const Decl *getDecl() const { return Value.getPointer(); }
+
+ CXXCtorType getCtorType() const {
+ assert(isa<CXXConstructorDecl>(getDecl()) && "Decl is not a ctor!");
+ return static_cast<CXXCtorType>(Value.getInt());
+ }
+
+ CXXDtorType getDtorType() const {
+ assert(isa<CXXDestructorDecl>(getDecl()) && "Decl is not a dtor!");
+ return static_cast<CXXDtorType>(Value.getInt());
+ }
+
+ friend bool operator==(const GlobalDecl &LHS, const GlobalDecl &RHS) {
+ return LHS.Value == RHS.Value;
+ }
+
+ void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
+
+ static GlobalDecl getFromOpaquePtr(void *P) {
+ GlobalDecl GD;
+ GD.Value.setFromOpaqueValue(P);
+ return GD;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+namespace llvm {
+ template<class> struct DenseMapInfo;
+
+ template<> struct DenseMapInfo<clang::CodeGen::GlobalDecl> {
+ static inline clang::CodeGen::GlobalDecl getEmptyKey() {
+ return clang::CodeGen::GlobalDecl();
+ }
+
+ static inline clang::CodeGen::GlobalDecl getTombstoneKey() {
+ return clang::CodeGen::GlobalDecl::
+ getFromOpaquePtr(reinterpret_cast<void*>(-1));
+ }
+
+ static unsigned getHashValue(clang::CodeGen::GlobalDecl GD) {
+ return DenseMapInfo<void*>::getHashValue(GD.getAsOpaquePtr());
+ }
+
+ static bool isEqual(clang::CodeGen::GlobalDecl LHS,
+ clang::CodeGen::GlobalDecl RHS) {
+ return LHS == RHS;
+ }
+
+ };
+
+ // GlobalDecl isn't *technically* a POD type. However, its copy constructor,
+ // copy assignment operator, and destructor are all trivial.
+ template <>
+ struct isPodLike<clang::CodeGen::GlobalDecl> {
+ static const bool value = true;
+ };
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
new file mode 100644
index 0000000..eefc530
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -0,0 +1,1023 @@
+//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targetting the Itanium C++ ABI. The class
+// in this file generates structures that follow the Itanium C++ ABI, which is
+// documented at:
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+//
+// It also supports the closely-related ARM ABI, documented at:
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+#include <clang/AST/Type.h>
+#include <llvm/Target/TargetData.h>
+#include <llvm/Value.h>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class ItaniumCXXABI : public CodeGen::CGCXXABI {
+private:
+ const llvm::IntegerType *PtrDiffTy;
+protected:
+ CodeGen::MangleContext MangleCtx;
+ bool IsARM;
+
+ // It's a little silly for us to cache this.
+ const llvm::IntegerType *getPtrDiffTy() {
+ if (!PtrDiffTy) {
+ QualType T = getContext().getPointerDiffType();
+ const llvm::Type *Ty = CGM.getTypes().ConvertTypeRecursive(T);
+ PtrDiffTy = cast<llvm::IntegerType>(Ty);
+ }
+ return PtrDiffTy;
+ }
+
+ bool NeedsArrayCookie(QualType ElementType);
+
+public:
+ ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) :
+ CGCXXABI(CGM), PtrDiffTy(0), MangleCtx(getContext(), CGM.getDiags()),
+ IsARM(IsARM) { }
+
+ CodeGen::MangleContext &getMangleContext() {
+ return MangleCtx;
+ }
+
+ bool isZeroInitializable(const MemberPointerType *MPT);
+
+ const llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
+
+ llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemFnPtr,
+ const MemberPointerType *MPT);
+
+ llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src);
+
+ llvm::Constant *EmitMemberPointerConversion(llvm::Constant *C,
+ const CastExpr *E);
+
+ llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+
+ llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
+ llvm::Constant *EmitMemberPointer(const FieldDecl *FD);
+
+ llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality);
+
+ llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *Addr,
+ const MemberPointerType *MPT);
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params);
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
+
+ CharUnits GetArrayCookieSize(QualType ElementType);
+ llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType);
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+};
+
+class ARMCXXABI : public ItaniumCXXABI {
+public:
+ ARMCXXABI(CodeGen::CodeGenModule &CGM) : ItaniumCXXABI(CGM, /*ARM*/ true) {}
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params);
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
+
+ void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy);
+
+ CharUnits GetArrayCookieSize(QualType ElementType);
+ llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType);
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+private:
+ /// \brief Returns true if the given instance method is one of the
+ /// kinds that the ARM ABI says returns 'this'.
+ static bool HasThisReturn(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ return ((isa<CXXDestructorDecl>(MD) && GD.getDtorType() != Dtor_Deleting) ||
+ (isa<CXXConstructorDecl>(MD)));
+ }
+};
+}
+
+CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
+ return new ItaniumCXXABI(CGM);
+}
+
+CodeGen::CGCXXABI *CodeGen::CreateARMCXXABI(CodeGenModule &CGM) {
+ return new ARMCXXABI(CGM);
+}
+
+const llvm::Type *
+ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ if (MPT->isMemberDataPointer())
+ return getPtrDiffTy();
+ else
+ return llvm::StructType::get(CGM.getLLVMContext(),
+ getPtrDiffTy(), getPtrDiffTy(), NULL);
+}
+
+/// In the Itanium and ARM ABIs, method pointers have the form:
+/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
+///
+/// In the Itanium ABI:
+/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
+/// - the this-adjustment is (memptr.adj)
+/// - the virtual offset is (memptr.ptr - 1)
+///
+/// In the ARM ABI:
+/// - method pointers are virtual if (memptr.adj & 1) is nonzero
+/// - the this-adjustment is (memptr.adj >> 1)
+/// - the virtual offset is (memptr.ptr)
+/// ARM uses 'adj' for the virtual flag because Thumb functions
+/// may be only single-byte aligned.
+///
+/// If the member is virtual, the adjusted 'this' pointer points
+/// to a vtable pointer from which the virtual offset is applied.
+///
+/// If the member is non-virtual, memptr.ptr is the address of
+/// the function to call.
+llvm::Value *
+ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemFnPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ const llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
+ FPT->isVariadic());
+
+ const llvm::IntegerType *ptrdiff = getPtrDiffTy();
+ llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(ptrdiff, 1);
+
+ llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
+ llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
+ llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
+
+ // Extract memptr.adj, which is in the second field.
+ llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
+
+ // Compute the true adjustment.
+ llvm::Value *Adj = RawAdj;
+ if (IsARM)
+ Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
+
+ // Apply the adjustment and cast back to the original struct type
+ // for consistency.
+ llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
+ This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+
+ // Load the function pointer.
+ llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
+
+ // If the LSB in the function pointer is 1, the function pointer points to
+ // a virtual function.
+ llvm::Value *IsVirtual;
+ if (IsARM)
+ IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
+ else
+ IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
+ IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
+ Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
+
+ // In the virtual path, the adjustment left 'This' pointing to the
+ // vtable of the correct base subobject. The "function pointer" is an
+ // offset within the vtable (+1 for the virtual flag on non-ARM).
+ CGF.EmitBlock(FnVirtual);
+
+ // Cast the adjusted this to a pointer to vtable pointer and load.
+ const llvm::Type *VTableTy = Builder.getInt8PtrTy();
+ llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
+ VTable = Builder.CreateLoad(VTable, "memptr.vtable");
+
+ // Apply the offset.
+ llvm::Value *VTableOffset = FnAsInt;
+ if (!IsARM) VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
+ VTable = Builder.CreateGEP(VTable, VTableOffset);
+
+ // Load the virtual function to call.
+ VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
+ llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn");
+ CGF.EmitBranch(FnEnd);
+
+ // In the non-virtual path, the function pointer is actually a
+ // function pointer.
+ CGF.EmitBlock(FnNonVirtual);
+ llvm::Value *NonVirtualFn =
+ Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
+
+ // We're done.
+ CGF.EmitBlock(FnEnd);
+ llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo());
+ Callee->reserveOperandSpace(2);
+ Callee->addIncoming(VirtualFn, FnVirtual);
+ Callee->addIncoming(NonVirtualFn, FnNonVirtual);
+ return Callee;
+}
+
+/// Compute an l-value by applying the given pointer-to-member to a
+/// base object.
+llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ assert(MemPtr->getType() == getPtrDiffTy());
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ unsigned AS = cast<llvm::PointerType>(Base->getType())->getAddressSpace();
+
+ // Cast to char*.
+ Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
+
+ // Apply the offset, which we assume is non-null.
+ llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset");
+
+ // Cast the address to the appropriate pointer type, adopting the
+ // address space of the base pointer.
+ const llvm::Type *PType
+ = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
+ return Builder.CreateBitCast(Addr, PType);
+}
+
+/// Perform a derived-to-base or base-to-derived member pointer conversion.
+///
+/// Obligatory offset/adjustment diagram:
+/// <-- offset --> <-- adjustment -->
+/// |--------------------------|----------------------|--------------------|
+/// ^Derived address point ^Base address point ^Member address point
+///
+/// So when converting a base member pointer to a derived member pointer,
+/// we add the offset to the adjustment because the address point has
+/// decreased; and conversely, when converting a derived MP to a base MP
+/// we subtract the offset from the adjustment because the address point
+/// has increased.
+///
+/// The standard forbids (at compile time) conversion to and from
+/// virtual bases, which is why we don't have to consider them here.
+///
+/// The standard forbids (at run time) casting a derived MP to a base
+/// MP when the derived MP does not point to a member of the base.
+/// This is why -1 is a reasonable choice for null data member
+/// pointers.
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer);
+
+ if (isa<llvm::Constant>(Src))
+ return EmitMemberPointerConversion(cast<llvm::Constant>(Src), E);
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ const MemberPointerType *SrcTy =
+ E->getSubExpr()->getType()->getAs<MemberPointerType>();
+ const MemberPointerType *DestTy = E->getType()->getAs<MemberPointerType>();
+
+ const CXXRecordDecl *SrcDecl = SrcTy->getClass()->getAsCXXRecordDecl();
+ const CXXRecordDecl *DestDecl = DestTy->getClass()->getAsCXXRecordDecl();
+
+ bool DerivedToBase =
+ E->getCastKind() == CK_DerivedToBaseMemberPointer;
+
+ const CXXRecordDecl *BaseDecl, *DerivedDecl;
+ if (DerivedToBase)
+ DerivedDecl = SrcDecl, BaseDecl = DestDecl;
+ else
+ BaseDecl = SrcDecl, DerivedDecl = DestDecl;
+
+ llvm::Constant *Adj =
+ CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
+ E->path_begin(),
+ E->path_end());
+ if (!Adj) return Src;
+
+ // For member data pointers, this is just a matter of adding the
+ // offset if the source is non-null.
+ if (SrcTy->isMemberDataPointer()) {
+ llvm::Value *Dst;
+ if (DerivedToBase)
+ Dst = Builder.CreateNSWSub(Src, Adj, "adj");
+ else
+ Dst = Builder.CreateNSWAdd(Src, Adj, "adj");
+
+ // Null check.
+ llvm::Value *Null = llvm::Constant::getAllOnesValue(Src->getType());
+ llvm::Value *IsNull = Builder.CreateICmpEQ(Src, Null, "memptr.isnull");
+ return Builder.CreateSelect(IsNull, Src, Dst);
+ }
+
+ // The this-adjustment is left-shifted by 1 on ARM.
+ if (IsARM) {
+ uint64_t Offset = cast<llvm::ConstantInt>(Adj)->getZExtValue();
+ Offset <<= 1;
+ Adj = llvm::ConstantInt::get(Adj->getType(), Offset);
+ }
+
+ llvm::Value *SrcAdj = Builder.CreateExtractValue(Src, 1, "src.adj");
+ llvm::Value *DstAdj;
+ if (DerivedToBase)
+ DstAdj = Builder.CreateNSWSub(SrcAdj, Adj, "adj");
+ else
+ DstAdj = Builder.CreateNSWAdd(SrcAdj, Adj, "adj");
+
+ return Builder.CreateInsertValue(Src, DstAdj, 1);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitMemberPointerConversion(llvm::Constant *C,
+ const CastExpr *E) {
+ const MemberPointerType *SrcTy =
+ E->getSubExpr()->getType()->getAs<MemberPointerType>();
+ const MemberPointerType *DestTy =
+ E->getType()->getAs<MemberPointerType>();
+
+ bool DerivedToBase =
+ E->getCastKind() == CK_DerivedToBaseMemberPointer;
+
+ const CXXRecordDecl *DerivedDecl;
+ if (DerivedToBase)
+ DerivedDecl = SrcTy->getClass()->getAsCXXRecordDecl();
+ else
+ DerivedDecl = DestTy->getClass()->getAsCXXRecordDecl();
+
+ // Calculate the offset to the base class.
+ llvm::Constant *Offset =
+ CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
+ E->path_begin(),
+ E->path_end());
+ // If there's no offset, we're done.
+ if (!Offset) return C;
+
+ // If the source is a member data pointer, we have to do a null
+ // check and then add the offset. In the common case, we can fold
+ // away the offset.
+ if (SrcTy->isMemberDataPointer()) {
+ assert(C->getType() == getPtrDiffTy());
+
+ // If it's a constant int, just create a new constant int.
+ if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) {
+ int64_t Src = CI->getSExtValue();
+
+ // Null converts to null.
+ if (Src == -1) return CI;
+
+ // Otherwise, just add the offset.
+ int64_t OffsetV = cast<llvm::ConstantInt>(Offset)->getSExtValue();
+ int64_t Dst = (DerivedToBase ? Src - OffsetV : Src + OffsetV);
+ return llvm::ConstantInt::get(CI->getType(), Dst, /*signed*/ true);
+ }
+
+ // Otherwise, we have to form a constant select expression.
+ llvm::Constant *Null = llvm::Constant::getAllOnesValue(C->getType());
+
+ llvm::Constant *IsNull =
+ llvm::ConstantExpr::getICmp(llvm::ICmpInst::ICMP_EQ, C, Null);
+
+ llvm::Constant *Dst;
+ if (DerivedToBase)
+ Dst = llvm::ConstantExpr::getNSWSub(C, Offset);
+ else
+ Dst = llvm::ConstantExpr::getNSWAdd(C, Offset);
+
+ return llvm::ConstantExpr::getSelect(IsNull, Null, Dst);
+ }
+
+ // The this-adjustment is left-shifted by 1 on ARM.
+ if (IsARM) {
+ int64_t OffsetV = cast<llvm::ConstantInt>(Offset)->getSExtValue();
+ OffsetV <<= 1;
+ Offset = llvm::ConstantInt::get(Offset->getType(), OffsetV);
+ }
+
+ llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
+
+ llvm::Constant *Values[2] = { CS->getOperand(0), 0 };
+ if (DerivedToBase)
+ Values[1] = llvm::ConstantExpr::getSub(CS->getOperand(1), Offset);
+ else
+ Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
+
+ return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
+ /*Packed=*/false);
+}
+
+
+llvm::Constant *
+ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ const llvm::Type *ptrdiff_t = getPtrDiffTy();
+
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ if (MPT->isMemberDataPointer())
+ return llvm::ConstantInt::get(ptrdiff_t, -1ULL, /*isSigned=*/true);
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(ptrdiff_t, 0);
+ llvm::Constant *Values[2] = { Zero, Zero };
+ return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
+ /*Packed=*/false);
+}
+
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const FieldDecl *FD) {
+ // Itanium C++ ABI 2.3:
+ // A pointer to data member is an offset from the base address of
+ // the class object containing it, represented as a ptrdiff_t
+
+ QualType ClassType = getContext().getTypeDeclType(FD->getParent());
+ const llvm::StructType *ClassLTy =
+ cast<llvm::StructType>(CGM.getTypes().ConvertType(ClassType));
+
+ const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(FD->getParent());
+ unsigned FieldNo = RL.getLLVMFieldNo(FD);
+ uint64_t Offset =
+ CGM.getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
+
+ return llvm::ConstantInt::get(getPtrDiffTy(), Offset);
+}
+
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ assert(MD->isInstance() && "Member function must not be static!");
+ MD = MD->getCanonicalDecl();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::Type *ptrdiff_t = getPtrDiffTy();
+
+ // Get the function pointer (or index if this is a virtual function).
+ llvm::Constant *MemPtr[2];
+ if (MD->isVirtual()) {
+ uint64_t Index = CGM.getVTables().getMethodVTableIndex(MD);
+
+ // FIXME: We shouldn't use / 8 here.
+ uint64_t PointerWidthInBytes =
+ getContext().Target.getPointerWidth(0) / 8;
+ uint64_t VTableOffset = (Index * PointerWidthInBytes);
+
+ if (IsARM) {
+ // ARM C++ ABI 3.2.1:
+ // This ABI specifies that adj contains twice the this
+ // adjustment, plus 1 if the member function is virtual. The
+ // least significant bit of adj then makes exactly the same
+ // discrimination as the least significant bit of ptr does for
+ // Itanium.
+ MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 1);
+ } else {
+ // Itanium C++ ABI 2.3:
+ // For a virtual function, [the pointer field] is 1 plus the
+ // virtual table offset (in bytes) of the function,
+ // represented as a ptrdiff_t.
+ MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset + 1);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
+ }
+ } else {
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty;
+ // Check whether the function has a computable LLVM signature.
+ if (!CodeGenTypes::VerifyFuncTypeComplete(FPT)) {
+ // The function has a computable LLVM signature; use the correct type.
+ Ty = Types.GetFunctionType(Types.getFunctionInfo(MD), FPT->isVariadic());
+ } else {
+ // Use an arbitrary non-function type to tell GetAddrOfFunction that the
+ // function type is incomplete.
+ Ty = ptrdiff_t;
+ }
+
+ llvm::Constant *Addr = CGM.GetAddrOfFunction(MD, Ty);
+ MemPtr[0] = llvm::ConstantExpr::getPtrToInt(Addr, ptrdiff_t);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
+ }
+
+ return llvm::ConstantStruct::get(CGM.getLLVMContext(),
+ MemPtr, 2, /*Packed=*/false);
+}
+
+/// The comparison algorithm is pretty easy: the member pointers are
+/// the same if they're either bitwise identical *or* both null.
+///
+/// ARM is different here only because null-ness is more complicated.
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::ICmpInst::Predicate Eq;
+ llvm::Instruction::BinaryOps And, Or;
+ if (Inequality) {
+ Eq = llvm::ICmpInst::ICMP_NE;
+ And = llvm::Instruction::Or;
+ Or = llvm::Instruction::And;
+ } else {
+ Eq = llvm::ICmpInst::ICMP_EQ;
+ And = llvm::Instruction::And;
+ Or = llvm::Instruction::Or;
+ }
+
+ // Member data pointers are easy because there's a unique null
+ // value, so it just comes down to bitwise equality.
+ if (MPT->isMemberDataPointer())
+ return Builder.CreateICmp(Eq, L, R);
+
+ // For member function pointers, the tautologies are more complex.
+ // The Itanium tautology is:
+ // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
+ // The ARM tautology is:
+ // (L == R) <==> (L.ptr == R.ptr &&
+ // (L.adj == R.adj ||
+ // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
+ // The inequality tautologies have exactly the same structure, except
+ // applying De Morgan's laws.
+
+ llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
+ llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
+
+ // This condition tests whether L.ptr == R.ptr. This must always be
+ // true for equality to hold.
+ llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
+
+ // This condition, together with the assumption that L.ptr == R.ptr,
+ // tests whether the pointers are both null. ARM imposes an extra
+ // condition.
+ llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
+ llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
+
+ // This condition tests whether L.adj == R.adj. If this isn't
+ // true, the pointers are unequal unless they're both null.
+ llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
+ llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
+ llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
+
+ // Null member function pointers on ARM clear the low bit of Adj,
+ // so the zero condition has to check that neither low bit is set.
+ if (IsARM) {
+ llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
+
+ // Compute (l.adj | r.adj) & 1 and test it against zero.
+ llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
+ llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
+ llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
+ "cmp.or.adj");
+ EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
+ }
+
+ // Tie together all our conditions.
+ llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
+ Result = Builder.CreateBinOp(And, PtrEq, Result,
+ Inequality ? "memptr.ne" : "memptr.eq");
+ return Result;
+}
+
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ /// For member data pointers, this is just a check against -1.
+ if (MPT->isMemberDataPointer()) {
+ assert(MemPtr->getType() == getPtrDiffTy());
+ llvm::Value *NegativeOne =
+ llvm::Constant::getAllOnesValue(MemPtr->getType());
+ return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
+ }
+
+ // In Itanium, a member function pointer is null if 'ptr' is null.
+ llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
+ llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
+
+ // In ARM, it's that, plus the low bit of 'adj' must be zero.
+ if (IsARM) {
+ llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
+ llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
+ llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
+ llvm::Value *IsNotVirtual = Builder.CreateICmpEQ(VirtualBit, Zero,
+ "memptr.notvirtual");
+ Result = Builder.CreateAnd(Result, IsNotVirtual);
+ }
+
+ return Result;
+}
+
+/// The Itanium ABI requires non-zero initialization only for data
+/// member pointers, for which '0' is a valid offset.
+bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ return MPT->getPointeeType()->isFunctionType();
+}
+
+/// The generic ABI passes 'this', plus a VTT if it's initializing a
+/// base subobject.
+void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ ASTContext &Context = getContext();
+
+ // 'this' is already there.
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (Type == Ctor_Base && Ctor->getParent()->getNumVBases() != 0)
+ ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+}
+
+/// The ARM ABI does the same as the Itanium ABI, but returns 'this'.
+void ARMCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ ItaniumCXXABI::BuildConstructorSignature(Ctor, Type, ResTy, ArgTys);
+ ResTy = ArgTys[0];
+}
+
+/// The generic ABI passes 'this', plus a VTT if it's destroying a
+/// base subobject.
+void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ ASTContext &Context = getContext();
+
+ // 'this' is already there.
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (Type == Dtor_Base && Dtor->getParent()->getNumVBases() != 0)
+ ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+}
+
+/// The ARM ABI does the same as the Itanium ABI, but returns 'this'
+/// for non-deleting destructors.
+void ARMCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ ItaniumCXXABI::BuildDestructorSignature(Dtor, Type, ResTy, ArgTys);
+
+ if (Type != Dtor_Deleting)
+ ResTy = ArgTys[0];
+}
+
+void ItaniumCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ /// Create the 'this' variable.
+ BuildThisParam(CGF, Params);
+
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+ assert(MD->isInstance());
+
+ // Check if we need a VTT parameter as well.
+ if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
+ ASTContext &Context = getContext();
+
+ // FIXME: avoid the fake decl
+ QualType T = Context.getPointerType(Context.VoidPtrTy);
+ ImplicitParamDecl *VTTDecl
+ = ImplicitParamDecl::Create(Context, 0, MD->getLocation(),
+ &Context.Idents.get("vtt"), T);
+ Params.push_back(std::make_pair(VTTDecl, VTTDecl->getType()));
+ getVTTDecl(CGF) = VTTDecl;
+ }
+}
+
+void ARMCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ ItaniumCXXABI::BuildInstanceFunctionParams(CGF, ResTy, Params);
+
+ // Return 'this' from certain constructors and destructors.
+ if (HasThisReturn(CGF.CurGD))
+ ResTy = Params[0].second;
+}
+
+void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ EmitThisParam(CGF);
+
+ /// Initialize the 'vtt' slot if needed.
+ if (getVTTDecl(CGF)) {
+ getVTTValue(CGF)
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getVTTDecl(CGF)),
+ "vtt");
+ }
+}
+
+void ARMCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ ItaniumCXXABI::EmitInstanceFunctionProlog(CGF);
+
+ /// Initialize the return slot to 'this' at the start of the
+ /// function.
+ if (HasThisReturn(CGF.CurGD))
+ CGF.Builder.CreateStore(CGF.LoadCXXThis(), CGF.ReturnValue);
+}
+
+void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
+ return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
+
+ // Destructor thunks in the ARM ABI have indeterminate results.
+ const llvm::Type *T =
+ cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType();
+ RValue Undef = RValue::get(llvm::UndefValue::get(T));
+ return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
+}
+
+/************************** Array allocation cookies **************************/
+
+bool ItaniumCXXABI::NeedsArrayCookie(QualType ElementType) {
+ ElementType = getContext().getBaseElementType(ElementType);
+ const RecordType *RT = ElementType->getAs<RecordType>();
+ if (!RT) return false;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ // If the class has a non-trivial destructor, it always needs a cookie.
+ if (!RD->hasTrivialDestructor()) return true;
+
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie. Otherwise we don't need a cookie.
+ const CXXMethodDecl *UsualDeallocationFunction = 0;
+
+ // Usual deallocation functions of this form are always found on the
+ // class.
+ //
+ // FIXME: what exactly is this code supposed to do if there's an
+ // ambiguity? That's possible with using declarations.
+ DeclarationName OpName =
+ getContext().DeclarationNames.getCXXOperatorName(OO_Array_Delete);
+ DeclContext::lookup_const_iterator Op, OpEnd;
+ for (llvm::tie(Op, OpEnd) = RD->lookup(OpName); Op != OpEnd; ++Op) {
+ const CXXMethodDecl *Delete =
+ cast<CXXMethodDecl>((*Op)->getUnderlyingDecl());
+
+ if (Delete->isUsualDeallocationFunction()) {
+ UsualDeallocationFunction = Delete;
+ break;
+ }
+ }
+
+ // No usual deallocation function, we don't need a cookie.
+ if (!UsualDeallocationFunction)
+ return false;
+
+ // The usual deallocation function doesn't take a size_t argument,
+ // so we don't need a cookie.
+ if (UsualDeallocationFunction->getNumParams() == 1)
+ return false;
+
+ assert(UsualDeallocationFunction->getNumParams() == 2 &&
+ "Unexpected deallocation function type!");
+ return true;
+}
+
+CharUnits ItaniumCXXABI::GetArrayCookieSize(QualType ElementType) {
+ if (!NeedsArrayCookie(ElementType))
+ return CharUnits::Zero();
+
+ // Padding is the maximum of sizeof(size_t) and alignof(ElementType)
+ ASTContext &Ctx = getContext();
+ return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
+ Ctx.getTypeAlignInChars(ElementType));
+}
+
+llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType) {
+ assert(NeedsArrayCookie(ElementType));
+
+ unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+
+ ASTContext &Ctx = getContext();
+ QualType SizeTy = Ctx.getSizeType();
+ CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy);
+
+ // The size of the cookie.
+ CharUnits CookieSize =
+ std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
+
+ // Compute an offset to the cookie.
+ llvm::Value *CookiePtr = NewPtr;
+ CharUnits CookieOffset = CookieSize - SizeSize;
+ if (!CookieOffset.isZero())
+ CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr,
+ CookieOffset.getQuantity());
+
+ // Write the number of elements into the appropriate slot.
+ llvm::Value *NumElementsPtr
+ = CGF.Builder.CreateBitCast(CookiePtr,
+ CGF.ConvertType(SizeTy)->getPointerTo(AS));
+ CGF.Builder.CreateStore(NumElements, NumElementsPtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
+ CookieSize.getQuantity());
+}
+
+void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr,
+ CharUnits &CookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+
+ // If we don't need an array cookie, bail out early.
+ if (!NeedsArrayCookie(ElementType)) {
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ NumElements = 0;
+ CookieSize = CharUnits::Zero();
+ return;
+ }
+
+ QualType SizeTy = getContext().getSizeType();
+ CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
+ const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+
+ CookieSize
+ = std::max(SizeSize, getContext().getTypeAlignInChars(ElementType));
+
+ CharUnits NumElementsOffset = CookieSize - SizeSize;
+
+ // Compute the allocated pointer.
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ -CookieSize.getQuantity());
+
+ llvm::Value *NumElementsPtr = AllocPtr;
+ if (!NumElementsOffset.isZero())
+ NumElementsPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(NumElementsPtr,
+ NumElementsOffset.getQuantity());
+ NumElementsPtr =
+ CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
+ NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+}
+
+CharUnits ARMCXXABI::GetArrayCookieSize(QualType ElementType) {
+ if (!NeedsArrayCookie(ElementType))
+ return CharUnits::Zero();
+
+ // On ARM, the cookie is always:
+ // struct array_cookie {
+ // std::size_t element_size; // element_size != 0
+ // std::size_t element_count;
+ // };
+ // TODO: what should we do if the allocated type actually wants
+ // greater alignment?
+ return getContext().getTypeSizeInChars(getContext().getSizeType()) * 2;
+}
+
+llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType) {
+ assert(NeedsArrayCookie(ElementType));
+
+ // NewPtr is a char*.
+
+ unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+
+ ASTContext &Ctx = getContext();
+ CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType());
+ const llvm::IntegerType *SizeTy =
+ cast<llvm::IntegerType>(CGF.ConvertType(Ctx.getSizeType()));
+
+ // The cookie is always at the start of the buffer.
+ llvm::Value *CookiePtr = NewPtr;
+
+ // The first element is the element size.
+ CookiePtr = CGF.Builder.CreateBitCast(CookiePtr, SizeTy->getPointerTo(AS));
+ llvm::Value *ElementSize = llvm::ConstantInt::get(SizeTy,
+ Ctx.getTypeSizeInChars(ElementType).getQuantity());
+ CGF.Builder.CreateStore(ElementSize, CookiePtr);
+
+ // The second element is the element count.
+ CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_32(CookiePtr, 1);
+ CGF.Builder.CreateStore(NumElements, CookiePtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ CharUnits CookieSize = 2 * SizeSize;
+ return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
+ CookieSize.getQuantity());
+}
+
+void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr,
+ CharUnits &CookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+
+ // If we don't need an array cookie, bail out early.
+ if (!NeedsArrayCookie(ElementType)) {
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ NumElements = 0;
+ CookieSize = CharUnits::Zero();
+ return;
+ }
+
+ QualType SizeTy = getContext().getSizeType();
+ CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
+ const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+
+ // The cookie size is always 2 * sizeof(size_t).
+ CookieSize = 2 * SizeSize;
+ CharUnits NumElementsOffset = CookieSize - SizeSize;
+
+ // The allocated pointer is the input ptr, minus that amount.
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ -CookieSize.getQuantity());
+
+ // The number of elements is at offset sizeof(size_t) relative to that.
+ llvm::Value *NumElementsPtr
+ = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ SizeSize.getQuantity());
+ NumElementsPtr =
+ CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
+ NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Makefile b/contrib/llvm/tools/clang/lib/CodeGen/Makefile
new file mode 100644
index 0000000..6032dff
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Makefile
@@ -0,0 +1,19 @@
+##===- clang/lib/CodeGen/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the AST -> LLVM code generation library for the
+# C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../..
+LIBRARYNAME := clangCodeGen
+
+include $(CLANG_LEVEL)/Makefile
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
new file mode 100644
index 0000000..e198874
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
@@ -0,0 +1,2515 @@
+//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "CGVTables.h"
+
+#define MANGLE_CHECKER 0
+
+#if MANGLE_CHECKER
+#include <cxxabi.h>
+#endif
+
+using namespace clang;
+using namespace CodeGen;
+
+MiscNameMangler::MiscNameMangler(MangleContext &C,
+ llvm::SmallVectorImpl<char> &Res)
+ : Context(C), Out(Res) { }
+
+void MiscNameMangler::mangleBlock(GlobalDecl GD, const BlockDecl *BD) {
+ // Mangle the context of the block.
+ // FIXME: We currently mimic GCC's mangling scheme, which leaves much to be
+ // desired. Come up with a better mangling scheme.
+ const DeclContext *DC = BD->getDeclContext();
+ while (isa<BlockDecl>(DC) || isa<EnumDecl>(DC))
+ DC = DC->getParent();
+ if (DC->isFunctionOrMethod()) {
+ Out << "__";
+ if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ const NamedDecl *ND = cast<NamedDecl>(DC);
+ if (IdentifierInfo *II = ND->getIdentifier())
+ Out << II->getName();
+ else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND)) {
+ llvm::SmallString<64> Buffer;
+ Context.mangleCXXDtor(D, GD.getDtorType(), Buffer);
+ Out << Buffer;
+ }
+ else if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND)) {
+ llvm::SmallString<64> Buffer;
+ Context.mangleCXXCtor(D, GD.getCtorType(), Buffer);
+ Out << Buffer;
+ }
+ else {
+ // FIXME: We were doing a mangleUnqualifiedName() before, but that's
+ // a private member of a class that will soon itself be private to the
+ // Itanium C++ ABI object. What should we do now? Right now, I'm just
+ // calling the mangleName() method on the MangleContext; is there a
+ // better way?
+ llvm::SmallString<64> Buffer;
+ Context.mangleName(ND, Buffer);
+ Out << Buffer;
+ }
+ }
+ Out << "_block_invoke_" << Context.getBlockId(BD, true);
+ } else {
+ Out << "__block_global_" << Context.getBlockId(BD, false);
+ }
+}
+
+void MiscNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ llvm::SmallString<64> Name;
+ llvm::raw_svector_ostream OS(Name);
+
+ const ObjCContainerDecl *CD =
+ dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
+ OS << '(' << CID << ')';
+ OS << ' ' << MD->getSelector().getAsString() << ']';
+
+ Out << OS.str().size() << OS.str();
+}
+
+namespace {
+
+static const DeclContext *GetLocalClassFunctionDeclContext(
+ const DeclContext *DC) {
+ if (isa<CXXRecordDecl>(DC)) {
+ while (!DC->isNamespace() && !DC->isTranslationUnit() &&
+ !isa<FunctionDecl>(DC))
+ DC = DC->getParent();
+ if (isa<FunctionDecl>(DC))
+ return DC;
+ }
+ return 0;
+}
+
+static const CXXMethodDecl *getStructor(const CXXMethodDecl *MD) {
+ assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) &&
+ "Passed in decl is not a ctor or dtor!");
+
+ if (const TemplateDecl *TD = MD->getPrimaryTemplate()) {
+ MD = cast<CXXMethodDecl>(TD->getTemplatedDecl());
+
+ assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) &&
+ "Templated decl is not a ctor or dtor!");
+ }
+
+ return MD;
+}
+
+static const unsigned UnknownArity = ~0U;
+
+/// CXXNameMangler - Manage the mangling of a single name.
+class CXXNameMangler {
+ MangleContext &Context;
+ llvm::raw_svector_ostream Out;
+
+ const CXXMethodDecl *Structor;
+ unsigned StructorType;
+
+ /// SeqID - The next subsitution sequence number.
+ unsigned SeqID;
+
+ llvm::DenseMap<uintptr_t, unsigned> Substitutions;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
+ : Context(C), Out(Res), Structor(0), StructorType(0), SeqID(0) { }
+ CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
+ const CXXConstructorDecl *D, CXXCtorType Type)
+ : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
+ CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
+ const CXXDestructorDecl *D, CXXDtorType Type)
+ : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
+
+#if MANGLE_CHECKER
+ ~CXXNameMangler() {
+ if (Out.str()[0] == '\01')
+ return;
+
+ int status = 0;
+ char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
+ assert(status == 0 && "Could not demangle mangled name!");
+ free(result);
+ }
+#endif
+ llvm::raw_svector_ostream &getStream() { return Out; }
+
+ void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z");
+ void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
+ void mangleNumber(const llvm::APSInt &I);
+ void mangleNumber(int64_t Number);
+ void mangleFloat(const llvm::APFloat &F);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleName(const NamedDecl *ND);
+ void mangleType(QualType T);
+ void mangleNameOrStandardSubstitution(const NamedDecl *ND);
+
+private:
+ bool mangleSubstitution(const NamedDecl *ND);
+ bool mangleSubstitution(QualType T);
+ bool mangleSubstitution(TemplateName Template);
+ bool mangleSubstitution(uintptr_t Ptr);
+
+ bool mangleStandardSubstitution(const NamedDecl *ND);
+
+ void addSubstitution(const NamedDecl *ND) {
+ ND = cast<NamedDecl>(ND->getCanonicalDecl());
+
+ addSubstitution(reinterpret_cast<uintptr_t>(ND));
+ }
+ void addSubstitution(QualType T);
+ void addSubstitution(TemplateName Template);
+ void addSubstitution(uintptr_t Ptr);
+
+ void mangleUnresolvedScope(NestedNameSpecifier *Qualifier);
+ void mangleUnresolvedName(NestedNameSpecifier *Qualifier,
+ DeclarationName Name,
+ unsigned KnownArity = UnknownArity);
+
+ void mangleName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity);
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name,
+ unsigned KnownArity);
+ void mangleUnscopedName(const NamedDecl *ND);
+ void mangleUnscopedTemplateName(const TemplateDecl *ND);
+ void mangleUnscopedTemplateName(TemplateName);
+ void mangleSourceName(const IdentifierInfo *II);
+ void mangleLocalName(const NamedDecl *ND);
+ void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
+ bool NoFunction=false);
+ void mangleNestedName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void manglePrefix(const DeclContext *DC, bool NoFunction=false);
+ void mangleTemplatePrefix(const TemplateDecl *ND);
+ void mangleTemplatePrefix(TemplateName Template);
+ void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
+ void mangleQualifiers(Qualifiers Quals);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleType(TemplateName);
+ void mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType);
+
+ void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
+ void mangleMemberExpr(const Expr *Base, bool IsArrow,
+ NestedNameSpecifier *Qualifier,
+ DeclarationName Name,
+ unsigned KnownArity);
+ void mangleExpression(const Expr *E, unsigned Arity = UnknownArity);
+ void mangleCXXCtorType(CXXCtorType T);
+ void mangleCXXDtorType(CXXDtorType T);
+
+ void mangleTemplateArgs(const ExplicitTemplateArgumentList &TemplateArgs);
+ void mangleTemplateArgs(TemplateName Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgumentList &AL);
+ void mangleTemplateArg(const NamedDecl *P, const TemplateArgument &A);
+
+ void mangleTemplateParameter(unsigned Index);
+};
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
+ for (const DeclContext *DC = D->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+ // (always) as does passing a C++ member function and a function
+ // whose name is not a simple identifier.
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+ !FD->getDeclName().isIdentifier()))
+ return true;
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOptions().CPlusPlus)
+ return false;
+
+ // Variables at global scope with non-internal linkage are not mangled
+ if (!FD) {
+ const DeclContext *DC = D->getDeclContext();
+ // Check for extern variable declared locally.
+ if (DC->isFunctionOrMethod() && D->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = DC->getParent();
+ if (DC->isTranslationUnit() && D->getLinkage() != InternalLinkage)
+ return false;
+ }
+
+ // Class members are always mangled.
+ if (D->getDeclContext()->isRecord())
+ return true;
+
+ // C functions and "main" are not mangled.
+ if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+ return false;
+
+ return true;
+}
+
+void CXXNameMangler::mangle(const NamedDecl *D, llvm::StringRef Prefix) {
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+ Out << ALA->getLabel();
+ return;
+ }
+
+ // <mangled-name> ::= _Z <encoding>
+ // ::= <data name>
+ // ::= <special-name>
+ Out << Prefix;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleName(VD);
+ else
+ mangleName(cast<FieldDecl>(D));
+}
+
+void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <encoding> ::= <function name> <bare-function-type>
+ mangleName(FD);
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ // Whether the mangling of a function type includes the return type depends on
+ // the context and the nature of the function. The rules for deciding whether
+ // the return type is included are:
+ //
+ // 1. Template functions (names or types) have return types encoded, with
+ // the exceptions listed below.
+ // 2. Function types not appearing as part of a function name mangling,
+ // e.g. parameters, pointer types, etc., have return type encoded, with the
+ // exceptions listed below.
+ // 3. Non-template function names do not have return types encoded.
+ //
+ // The exceptions mentioned in (1) and (2) above, for which the return type is
+ // never included, are
+ // 1. Constructors.
+ // 2. Destructors.
+ // 3. Conversion operator functions, e.g. operator int.
+ bool MangleReturnType = false;
+ if (FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate()) {
+ if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD) ||
+ isa<CXXConversionDecl>(FD)))
+ MangleReturnType = true;
+
+ // Mangle the type of the primary template.
+ FD = PrimaryTemplate->getTemplatedDecl();
+ }
+
+ // Do the canonicalization out here because parameter types can
+ // undergo additional canonicalization (e.g. array decay).
+ FunctionType *FT = cast<FunctionType>(Context.getASTContext()
+ .getCanonicalType(FD->getType()));
+
+ mangleBareFunctionType(FT, MangleReturnType);
+}
+
+static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
+ while (isa<LinkageSpecDecl>(DC)) {
+ DC = DC->getParent();
+ }
+
+ return DC;
+}
+
+/// isStd - Return whether a given namespace is the 'std' namespace.
+static bool isStd(const NamespaceDecl *NS) {
+ if (!IgnoreLinkageSpecDecls(NS->getParent())->isTranslationUnit())
+ return false;
+
+ const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
+ return II && II->isStr("std");
+}
+
+// isStdNamespace - Return whether a given decl context is a toplevel 'std'
+// namespace.
+static bool isStdNamespace(const DeclContext *DC) {
+ if (!DC->isNamespace())
+ return false;
+
+ return isStd(cast<NamespaceDecl>(DC));
+}
+
+static const TemplateDecl *
+isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
+ // Check if we have a function template.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){
+ if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
+ TemplateArgs = FD->getTemplateSpecializationArgs();
+ return TD;
+ }
+ }
+
+ // Check if we have a class template.
+ if (const ClassTemplateSpecializationDecl *Spec =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ TemplateArgs = &Spec->getTemplateArgs();
+ return Spec->getSpecializedTemplate();
+ }
+
+ return 0;
+}
+
+void CXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <nested-name>
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ // ::= <local-name>
+ //
+ const DeclContext *DC = ND->getDeclContext();
+
+ if (GetLocalClassFunctionDeclContext(DC)) {
+ mangleLocalName(ND);
+ return;
+ }
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleUnscopedTemplateName(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ return;
+ }
+
+ mangleUnscopedName(ND);
+ return;
+ }
+
+ if (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)) {
+ mangleLocalName(ND);
+ return;
+ }
+
+ mangleNestedName(ND, DC);
+}
+void CXXNameMangler::mangleName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ const DeclContext *DC = IgnoreLinkageSpecDecls(TD->getDeclContext());
+
+ if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+ mangleUnscopedTemplateName(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
+ } else {
+ mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
+ }
+}
+
+void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) {
+ // <unscoped-name> ::= <unqualified-name>
+ // ::= St <unqualified-name> # ::std::
+ if (isStdNamespace(ND->getDeclContext()))
+ Out << "St";
+
+ mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (mangleSubstitution(ND))
+ return;
+
+ // <template-template-param> ::= <template-param>
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ mangleTemplateParameter(TTP->getIndex());
+ return;
+ }
+
+ mangleUnscopedName(ND->getTemplatedDecl());
+ addSubstitution(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleUnscopedTemplateName(TD);
+
+ if (mangleSubstitution(Template))
+ return;
+
+ // FIXME: How to cope with operators here?
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Not a dependent template name?");
+ if (!Dependent->isIdentifier()) {
+ // FIXME: We can't possibly know the arity of the operator here!
+ Diagnostic &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot mangle dependent operator name");
+ Diags.Report(FullSourceLoc(), DiagID);
+ return;
+ }
+
+ mangleSourceName(Dependent->getIdentifier());
+ addSubstitution(Template);
+}
+
+void CXXNameMangler::mangleFloat(const llvm::APFloat &F) {
+ // TODO: avoid this copy with careful stream management.
+ llvm::SmallString<20> Buffer;
+ F.bitcastToAPInt().toString(Buffer, 16, false);
+ Out.write(Buffer.data(), Buffer.size());
+}
+
+void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
+ if (Value.isSigned() && Value.isNegative()) {
+ Out << 'n';
+ Value.abs().print(Out, true);
+ } else
+ Value.print(Out, Value.isSigned());
+}
+
+void CXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [n] <non-negative decimal integer>
+ if (Number < 0) {
+ Out << 'n';
+ Number = -Number;
+ }
+
+ Out << Number;
+}
+
+void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) {
+ // <call-offset> ::= h <nv-offset> _
+ // ::= v <v-offset> _
+ // <nv-offset> ::= <offset number> # non-virtual base override
+ // <v-offset> ::= <offset number> _ <virtual offset number>
+ // # virtual base override, with vcall offset
+ if (!Virtual) {
+ Out << 'h';
+ mangleNumber(NonVirtual);
+ Out << '_';
+ return;
+ }
+
+ Out << 'v';
+ mangleNumber(NonVirtual);
+ Out << '_';
+ mangleNumber(Virtual);
+ Out << '_';
+}
+
+void CXXNameMangler::mangleUnresolvedScope(NestedNameSpecifier *Qualifier) {
+ Qualifier = getASTContext().getCanonicalNestedNameSpecifier(Qualifier);
+ switch (Qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ // nothing
+ break;
+ case NestedNameSpecifier::Namespace:
+ mangleName(Qualifier->getAsNamespace());
+ break;
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ const Type *QTy = Qualifier->getAsType();
+
+ if (const TemplateSpecializationType *TST =
+ dyn_cast<TemplateSpecializationType>(QTy)) {
+ if (!mangleSubstitution(QualType(TST, 0))) {
+ mangleTemplatePrefix(TST->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
+ TST->getNumArgs());
+ addSubstitution(QualType(TST, 0));
+ }
+ } else {
+ // We use the QualType mangle type variant here because it handles
+ // substitutions.
+ mangleType(QualType(QTy, 0));
+ }
+ }
+ break;
+ case NestedNameSpecifier::Identifier:
+ // Member expressions can have these without prefixes.
+ if (Qualifier->getPrefix())
+ mangleUnresolvedScope(Qualifier->getPrefix());
+ mangleSourceName(Qualifier->getAsIdentifier());
+ break;
+ }
+}
+
+/// Mangles a name which was not resolved to a specific entity.
+void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *Qualifier,
+ DeclarationName Name,
+ unsigned KnownArity) {
+ if (Qualifier)
+ mangleUnresolvedScope(Qualifier);
+ // FIXME: ambiguity of unqualified lookup with ::
+
+ mangleUnqualifiedName(0, Name, KnownArity);
+}
+
+static const FieldDecl *FindFirstNamedDataMember(const RecordDecl *RD) {
+ assert(RD->isAnonymousStructOrUnion() &&
+ "Expected anonymous struct or union!");
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I) {
+ const FieldDecl *FD = *I;
+
+ if (FD->getIdentifier())
+ return FD;
+
+ if (const RecordType *RT = FD->getType()->getAs<RecordType>()) {
+ if (const FieldDecl *NamedDataMember =
+ FindFirstNamedDataMember(RT->getDecl()))
+ return NamedDataMember;
+ }
+ }
+
+ // We didn't find a named data member.
+ return 0;
+}
+
+void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name,
+ unsigned KnownArity) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ // We must avoid conflicts between internally- and externally-
+ // linked variable declaration names in the same TU.
+ // This naming convention is the same as that followed by GCC, though it
+ // shouldn't actually matter.
+ if (ND && isa<VarDecl>(ND) && ND->getLinkage() == InternalLinkage &&
+ ND->getDeclContext()->isFileContext())
+ Out << 'L';
+
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ // This is how gcc mangles these names.
+ Out << "12_GLOBAL__N_1";
+ break;
+ }
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ // We must have an anonymous union or struct declaration.
+ const RecordDecl *RD =
+ cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 5.1.2:
+ //
+ // For the purposes of mangling, the name of an anonymous union is
+ // considered to be the name of the first named data member found by a
+ // pre-order, depth-first, declaration-order walk of the data members of
+ // the anonymous union. If there is no such data member (i.e., if all of
+ // the data members in the union are unnamed), then there is no way for
+ // a program to refer to the anonymous union, and there is therefore no
+ // need to mangle its name.
+ const FieldDecl *FD = FindFirstNamedDataMember(RD);
+
+ // It's actually possible for various reasons for us to get here
+ // with an empty anonymous struct / union. Fortunately, it
+ // doesn't really matter what name we generate.
+ if (!FD) break;
+ assert(FD->getIdentifier() && "Data member name isn't an identifier!");
+
+ mangleSourceName(FD->getIdentifier());
+ break;
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // Get a unique id for the anonymous struct.
+ uint64_t AnonStructId = Context.getAnonymousStructId(TD);
+
+ // Mangle it as a source name in the form
+ // [n] $_<id>
+ // where n is the length of the string.
+ llvm::SmallString<8> Str;
+ Str += "$_";
+ Str += llvm::utostr(AnonStructId);
+
+ Out << Str.size();
+ Out << Str.str();
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ assert(false && "Can't mangle Objective-C selector names here!");
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ constructor we're mangling, use the type
+ // we were given.
+ mangleCXXCtorType(static_cast<CXXCtorType>(StructorType));
+ else
+ // Otherwise, use the complete constructor name. This is relevant if a
+ // class with a constructor is declared within a constructor.
+ mangleCXXCtorType(Ctor_Complete);
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ destructor we're mangling, use the type we
+ // were given.
+ mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
+ else
+ // Otherwise, use the complete destructor name. This is relevant if a
+ // class with a destructor is declared within a destructor.
+ mangleCXXDtorType(Dtor_Complete);
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= cv <type> # (cast)
+ Out << "cv";
+ mangleType(Context.getASTContext().getCanonicalType(Name.getCXXNameType()));
+ break;
+
+ case DeclarationName::CXXOperatorName: {
+ unsigned Arity;
+ if (ND) {
+ Arity = cast<FunctionDecl>(ND)->getNumParams();
+
+ // If we have a C++ member function, we need to include the 'this' pointer.
+ // FIXME: This does not make sense for operators that are static, but their
+ // names stay the same regardless of the arity (operator new for instance).
+ if (isa<CXXMethodDecl>(ND))
+ Arity++;
+ } else
+ Arity = KnownArity;
+
+ mangleOperatorName(Name.getCXXOverloadedOperator(), Arity);
+ break;
+ }
+
+ case DeclarationName::CXXLiteralOperatorName:
+ // FIXME: This mangling is not yet official.
+ Out << "li";
+ mangleSourceName(Name.getCXXLiteralIdentifier());
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ assert(false && "Can't mangle a using directive name!");
+ break;
+ }
+}
+
+void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source-name> ::= <positive length number> <identifier>
+ // <number> ::= [n] <non-negative decimal integer>
+ // <identifier> ::= <unqualified source code identifier>
+ Out << II->getLength() << II->getName();
+}
+
+void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
+ const DeclContext *DC,
+ bool NoFunction) {
+ // <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
+ // ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+
+ Out << 'N';
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND))
+ mangleQualifiers(Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ }
+ else {
+ manglePrefix(DC, NoFunction);
+ mangleUnqualifiedName(ND);
+ }
+
+ Out << 'E';
+}
+void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+
+ Out << 'N';
+
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
+
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
+ // <local-name> := Z <function encoding> E <entity name> [<discriminator>]
+ // := Z <function encoding> E s [<discriminator>]
+ // <discriminator> := _ <non-negative number>
+ const DeclContext *DC = ND->getDeclContext();
+ Out << 'Z';
+
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(MD);
+ }
+ else if (const DeclContext *CDC = GetLocalClassFunctionDeclContext(DC)) {
+ mangleFunctionEncoding(cast<FunctionDecl>(CDC));
+ Out << 'E';
+ mangleNestedName(ND, DC, true /*NoFunction*/);
+
+ // FIXME. This still does not cover all cases.
+ unsigned disc;
+ if (Context.getNextDiscriminator(ND, disc)) {
+ if (disc < 10)
+ Out << '_' << disc;
+ else
+ Out << "__" << disc << '_';
+ }
+
+ return;
+ }
+ else
+ mangleFunctionEncoding(cast<FunctionDecl>(DC));
+
+ Out << 'E';
+ mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
+ // <prefix> ::= <prefix> <unqualified-name>
+ // ::= <template-prefix> <template-args>
+ // ::= <template-param>
+ // ::= # empty
+ // ::= <substitution>
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) {
+ manglePrefix(DC->getParent(), NoFunction);
+ llvm::SmallString<64> Name;
+ Context.mangleBlock(GlobalDecl(), Block, Name);
+ Out << Name.size() << Name;
+ return;
+ }
+
+ if (mangleSubstitution(cast<NamedDecl>(DC)))
+ return;
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(cast<NamedDecl>(DC), TemplateArgs)) {
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ }
+ else if(NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
+ return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ manglePrefix(DC->getParent(), NoFunction);
+ mangleUnqualifiedName(cast<NamedDecl>(DC));
+ }
+
+ addSubstitution(cast<NamedDecl>(DC));
+}
+
+void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplatePrefix(TD);
+
+ if (QualifiedTemplateName *Qualified = Template.getAsQualifiedTemplateName())
+ mangleUnresolvedScope(Qualified->getQualifier());
+
+ if (OverloadedTemplateStorage *Overloaded
+ = Template.getAsOverloadedTemplate()) {
+ mangleUnqualifiedName(0, (*Overloaded->begin())->getDeclName(),
+ UnknownArity);
+ return;
+ }
+
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Unknown template name kind?");
+ mangleUnresolvedScope(Dependent->getQualifier());
+ mangleUnscopedTemplateName(Template);
+}
+
+void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ // <template-template-param> ::= <template-param>
+ // <substitution>
+
+ if (mangleSubstitution(ND))
+ return;
+
+ // <template-template-param> ::= <template-param>
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ mangleTemplateParameter(TTP->getIndex());
+ return;
+ }
+
+ manglePrefix(ND->getDeclContext());
+ mangleUnqualifiedName(ND->getTemplatedDecl());
+ addSubstitution(ND);
+}
+
+/// Mangles a template name under the production <type>. Required for
+/// template template arguments.
+/// <type> ::= <class-enum-type>
+/// ::= <template-param>
+/// ::= <substitution>
+void CXXNameMangler::mangleType(TemplateName TN) {
+ if (mangleSubstitution(TN))
+ return;
+
+ TemplateDecl *TD = 0;
+
+ switch (TN.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
+ goto HaveDecl;
+
+ case TemplateName::Template:
+ TD = TN.getAsTemplateDecl();
+ goto HaveDecl;
+
+ HaveDecl:
+ if (isa<TemplateTemplateParmDecl>(TD))
+ mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex());
+ else
+ mangleName(TD);
+ break;
+
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("can't mangle an overloaded template name as a <type>");
+ break;
+
+ case TemplateName::DependentTemplate: {
+ const DependentTemplateName *Dependent = TN.getAsDependentTemplateName();
+ assert(Dependent->isIdentifier());
+
+ // <class-enum-type> ::= <name>
+ // <name> ::= <nested-name>
+ mangleUnresolvedScope(Dependent->getQualifier());
+ mangleSourceName(Dependent->getIdentifier());
+ break;
+ }
+
+ }
+
+ addSubstitution(TN);
+}
+
+void
+CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
+ switch (OO) {
+ // <operator-name> ::= nw # new
+ case OO_New: Out << "nw"; break;
+ // ::= na # new[]
+ case OO_Array_New: Out << "na"; break;
+ // ::= dl # delete
+ case OO_Delete: Out << "dl"; break;
+ // ::= da # delete[]
+ case OO_Array_Delete: Out << "da"; break;
+ // ::= ps # + (unary)
+ // ::= pl # + (binary or unknown)
+ case OO_Plus:
+ Out << (Arity == 1? "ps" : "pl"); break;
+ // ::= ng # - (unary)
+ // ::= mi # - (binary or unknown)
+ case OO_Minus:
+ Out << (Arity == 1? "ng" : "mi"); break;
+ // ::= ad # & (unary)
+ // ::= an # & (binary or unknown)
+ case OO_Amp:
+ Out << (Arity == 1? "ad" : "an"); break;
+ // ::= de # * (unary)
+ // ::= ml # * (binary or unknown)
+ case OO_Star:
+ // Use binary when unknown.
+ Out << (Arity == 1? "de" : "ml"); break;
+ // ::= co # ~
+ case OO_Tilde: Out << "co"; break;
+ // ::= dv # /
+ case OO_Slash: Out << "dv"; break;
+ // ::= rm # %
+ case OO_Percent: Out << "rm"; break;
+ // ::= or # |
+ case OO_Pipe: Out << "or"; break;
+ // ::= eo # ^
+ case OO_Caret: Out << "eo"; break;
+ // ::= aS # =
+ case OO_Equal: Out << "aS"; break;
+ // ::= pL # +=
+ case OO_PlusEqual: Out << "pL"; break;
+ // ::= mI # -=
+ case OO_MinusEqual: Out << "mI"; break;
+ // ::= mL # *=
+ case OO_StarEqual: Out << "mL"; break;
+ // ::= dV # /=
+ case OO_SlashEqual: Out << "dV"; break;
+ // ::= rM # %=
+ case OO_PercentEqual: Out << "rM"; break;
+ // ::= aN # &=
+ case OO_AmpEqual: Out << "aN"; break;
+ // ::= oR # |=
+ case OO_PipeEqual: Out << "oR"; break;
+ // ::= eO # ^=
+ case OO_CaretEqual: Out << "eO"; break;
+ // ::= ls # <<
+ case OO_LessLess: Out << "ls"; break;
+ // ::= rs # >>
+ case OO_GreaterGreater: Out << "rs"; break;
+ // ::= lS # <<=
+ case OO_LessLessEqual: Out << "lS"; break;
+ // ::= rS # >>=
+ case OO_GreaterGreaterEqual: Out << "rS"; break;
+ // ::= eq # ==
+ case OO_EqualEqual: Out << "eq"; break;
+ // ::= ne # !=
+ case OO_ExclaimEqual: Out << "ne"; break;
+ // ::= lt # <
+ case OO_Less: Out << "lt"; break;
+ // ::= gt # >
+ case OO_Greater: Out << "gt"; break;
+ // ::= le # <=
+ case OO_LessEqual: Out << "le"; break;
+ // ::= ge # >=
+ case OO_GreaterEqual: Out << "ge"; break;
+ // ::= nt # !
+ case OO_Exclaim: Out << "nt"; break;
+ // ::= aa # &&
+ case OO_AmpAmp: Out << "aa"; break;
+ // ::= oo # ||
+ case OO_PipePipe: Out << "oo"; break;
+ // ::= pp # ++
+ case OO_PlusPlus: Out << "pp"; break;
+ // ::= mm # --
+ case OO_MinusMinus: Out << "mm"; break;
+ // ::= cm # ,
+ case OO_Comma: Out << "cm"; break;
+ // ::= pm # ->*
+ case OO_ArrowStar: Out << "pm"; break;
+ // ::= pt # ->
+ case OO_Arrow: Out << "pt"; break;
+ // ::= cl # ()
+ case OO_Call: Out << "cl"; break;
+ // ::= ix # []
+ case OO_Subscript: Out << "ix"; break;
+
+ // ::= qu # ?
+ // The conditional operator can't be overloaded, but we still handle it when
+ // mangling expressions.
+ case OO_Conditional: Out << "qu"; break;
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ assert(false && "Not an overloaded operator");
+ break;
+ }
+}
+
+void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
+ // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
+ if (Quals.hasRestrict())
+ Out << 'r';
+ if (Quals.hasVolatile())
+ Out << 'V';
+ if (Quals.hasConst())
+ Out << 'K';
+
+ if (Quals.hasAddressSpace()) {
+ // Extension:
+ //
+ // <type> ::= U <address-space-number>
+ //
+ // where <address-space-number> is a source name consisting of 'AS'
+ // followed by the address space <number>.
+ llvm::SmallString<64> ASString;
+ ASString = "AS" + llvm::utostr_32(Quals.getAddressSpace());
+ Out << 'U' << ASString.size() << ASString;
+ }
+
+ // FIXME: For now, just drop all extension qualifiers on the floor.
+}
+
+void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ llvm::SmallString<64> Buffer;
+ MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD);
+ Out << Buffer;
+}
+
+void CXXNameMangler::mangleType(QualType T) {
+ // Only operate on the canonical type!
+ T = Context.getASTContext().getCanonicalType(T);
+
+ bool IsSubstitutable = T.hasLocalQualifiers() || !isa<BuiltinType>(T);
+ if (IsSubstitutable && mangleSubstitution(T))
+ return;
+
+ if (Qualifiers Quals = T.getLocalQualifiers()) {
+ mangleQualifiers(Quals);
+ // Recurse: even if the qualified type isn't yet substitutable,
+ // the unqualified type might be.
+ mangleType(T.getLocalUnqualifiedType());
+ } else {
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+ return;
+#define TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
+ break;
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+
+ // Add the substitution.
+ if (IsSubstitutable)
+ addSubstitution(T);
+}
+
+void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) {
+ if (!mangleStandardSubstitution(ND))
+ mangleName(ND);
+}
+
+void CXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= v # void
+ // ::= w # wchar_t
+ // ::= b # bool
+ // ::= c # char
+ // ::= a # signed char
+ // ::= h # unsigned char
+ // ::= s # short
+ // ::= t # unsigned short
+ // ::= i # int
+ // ::= j # unsigned int
+ // ::= l # long
+ // ::= m # unsigned long
+ // ::= x # long long, __int64
+ // ::= y # unsigned long long, __int64
+ // ::= n # __int128
+ // UNSUPPORTED: ::= o # unsigned __int128
+ // ::= f # float
+ // ::= d # double
+ // ::= e # long double, __float80
+ // UNSUPPORTED: ::= g # __float128
+ // UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits)
+ // UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits)
+ // UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits)
+ // UNSUPPORTED: ::= Dh # IEEE 754r half-precision floating point (16 bits)
+ // ::= Di # char32_t
+ // ::= Ds # char16_t
+ // ::= u <source-name> # vendor extended type
+ // From our point of view, std::nullptr_t is a builtin, but as far as mangling
+ // is concerned, it's a type called std::nullptr_t.
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'v'; break;
+ case BuiltinType::Bool: Out << 'b'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break;
+ case BuiltinType::UChar: Out << 'h'; break;
+ case BuiltinType::UShort: Out << 't'; break;
+ case BuiltinType::UInt: Out << 'j'; break;
+ case BuiltinType::ULong: Out << 'm'; break;
+ case BuiltinType::ULongLong: Out << 'y'; break;
+ case BuiltinType::UInt128: Out << 'o'; break;
+ case BuiltinType::SChar: Out << 'a'; break;
+ case BuiltinType::WChar: Out << 'w'; break;
+ case BuiltinType::Char16: Out << "Ds"; break;
+ case BuiltinType::Char32: Out << "Di"; break;
+ case BuiltinType::Short: Out << 's'; break;
+ case BuiltinType::Int: Out << 'i'; break;
+ case BuiltinType::Long: Out << 'l'; break;
+ case BuiltinType::LongLong: Out << 'x'; break;
+ case BuiltinType::Int128: Out << 'n'; break;
+ case BuiltinType::Float: Out << 'f'; break;
+ case BuiltinType::Double: Out << 'd'; break;
+ case BuiltinType::LongDouble: Out << 'e'; break;
+ case BuiltinType::NullPtr: Out << "St9nullptr_t"; break;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ assert(false &&
+ "Overloaded and dependent types shouldn't get to name mangling");
+ break;
+ case BuiltinType::UndeducedAuto:
+ assert(0 && "Should not see undeduced auto here");
+ break;
+ case BuiltinType::ObjCId: Out << "11objc_object"; break;
+ case BuiltinType::ObjCClass: Out << "10objc_class"; break;
+ case BuiltinType::ObjCSel: Out << "13objc_selector"; break;
+ }
+}
+
+// <type> ::= <function-type>
+// <function-type> ::= F [Y] <bare-function-type> E
+void CXXNameMangler::mangleType(const FunctionProtoType *T) {
+ Out << 'F';
+ // FIXME: We don't have enough information in the AST to produce the 'Y'
+ // encoding for extern "C" function types.
+ mangleBareFunctionType(T, /*MangleReturnType=*/true);
+ Out << 'E';
+}
+void CXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ llvm_unreachable("Can't mangle K&R function prototypes");
+}
+void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType) {
+ // We should never be mangling something without a prototype.
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // <bare-function-type> ::= <signature type>+
+ if (MangleReturnType)
+ mangleType(Proto->getResultType());
+
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ // <builtin-type> ::= v # void
+ Out << 'v';
+ return;
+ }
+
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(*Arg);
+
+ // <builtin-type> ::= z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'z';
+}
+
+// <type> ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const TagType *T) {
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= A <positive dimension number> _ <element type>
+// ::= A [<dimension expression>] _ <element type>
+void CXXNameMangler::mangleType(const ConstantArrayType *T) {
+ Out << 'A' << T->getSize() << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const VariableArrayType *T) {
+ Out << 'A';
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ Out << 'A';
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ Out << 'A' << '_';
+ mangleType(T->getElementType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= M <class type> <member type>
+void CXXNameMangler::mangleType(const MemberPointerType *T) {
+ Out << 'M';
+ mangleType(QualType(T->getClass(), 0));
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals()));
+ mangleType(FPT);
+
+ // Itanium C++ ABI 5.1.8:
+ //
+ // The type of a non-static member function is considered to be different,
+ // for the purposes of substitution, from the type of a namespace-scope or
+ // static member function whose type appears similar. The types of two
+ // non-static member functions are considered to be different, for the
+ // purposes of substitution, if the functions are members of different
+ // classes. In other words, for the purposes of substitution, the class of
+ // which the function is a member is considered part of the type of
+ // function.
+
+ // We increment the SeqID here to emulate adding an entry to the
+ // substitution table. We can't actually add it because we don't want this
+ // particular function type to be substituted.
+ ++SeqID;
+ } else
+ mangleType(PointeeType);
+}
+
+// <type> ::= <template-param>
+void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ mangleTemplateParameter(T->getIndex());
+}
+
+// <type> ::= P <type> # pointer-to
+void CXXNameMangler::mangleType(const PointerType *T) {
+ Out << 'P';
+ mangleType(T->getPointeeType());
+}
+void CXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ Out << 'P';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= R <type> # reference-to
+void CXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'R';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= O <type> # rvalue reference-to (C++0x)
+void CXXNameMangler::mangleType(const RValueReferenceType *T) {
+ Out << 'O';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= C <type> # complex pair (C 2000)
+void CXXNameMangler::mangleType(const ComplexType *T) {
+ Out << 'C';
+ mangleType(T->getElementType());
+}
+
+// GNU extension: vector types
+// <type> ::= <vector-type>
+// <vector-type> ::= Dv <positive dimension number> _
+// <extended element type>
+// ::= Dv [<dimension expression>] _ <element type>
+// <extended element type> ::= <element type>
+// ::= p # AltiVec vector pixel
+void CXXNameMangler::mangleType(const VectorType *T) {
+ Out << "Dv" << T->getNumElements() << '_';
+ if (T->getAltiVecSpecific() == VectorType::Pixel)
+ Out << 'p';
+ else if (T->getAltiVecSpecific() == VectorType::Bool)
+ Out << 'b';
+ else
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const ExtVectorType *T) {
+ mangleType(static_cast<const VectorType*>(T));
+}
+void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ Out << "Dv";
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+
+void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ mangleSourceName(T->getDecl()->getIdentifier());
+}
+
+void CXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
+void CXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "U13block_pointer";
+ mangleType(T->getPointeeType());
+}
+
+void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ // Mangle injected class name types as if the user had written the
+ // specialization out fully. It may not actually be possible to see
+ // this mangling, though.
+ mangleType(T->getInjectedSpecializationType());
+}
+
+void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) {
+ mangleName(TD, T->getArgs(), T->getNumArgs());
+ } else {
+ if (mangleSubstitution(QualType(T, 0)))
+ return;
+
+ mangleTemplatePrefix(T->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(T->getTemplateName(), T->getArgs(), T->getNumArgs());
+ addSubstitution(QualType(T, 0));
+ }
+}
+
+void CXXNameMangler::mangleType(const DependentNameType *T) {
+ // Typename types are always nested
+ Out << 'N';
+ mangleUnresolvedScope(T->getQualifier());
+ mangleSourceName(T->getIdentifier());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
+ // Dependently-scoped template types are always nested
+ Out << 'N';
+
+ // TODO: avoid making this TemplateName.
+ TemplateName Prefix =
+ getASTContext().getDependentTemplateName(T->getQualifier(),
+ T->getIdentifier());
+ mangleTemplatePrefix(Prefix);
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const TypeOfType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const TypeOfExprType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const DecltypeType *T) {
+ Expr *E = T->getUnderlyingExpr();
+
+ // type ::= Dt <expression> E # decltype of an id-expression
+ // # or class member access
+ // ::= DT <expression> E # decltype of an expression
+
+ // This purports to be an exhaustive list of id-expressions and
+ // class member accesses. Note that we do not ignore parentheses;
+ // parentheses change the semantics of decltype for these
+ // expressions (and cause the mangler to use the other form).
+ if (isa<DeclRefExpr>(E) ||
+ isa<MemberExpr>(E) ||
+ isa<UnresolvedLookupExpr>(E) ||
+ isa<DependentScopeDeclRefExpr>(E) ||
+ isa<CXXDependentScopeMemberExpr>(E) ||
+ isa<UnresolvedMemberExpr>(E))
+ Out << "Dt";
+ else
+ Out << "DT";
+ mangleExpression(E);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleIntegerLiteral(QualType T,
+ const llvm::APSInt &Value) {
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ Out << 'L';
+
+ mangleType(T);
+ if (T->isBooleanType()) {
+ // Boolean values are encoded as 0/1.
+ Out << (Value.getBoolValue() ? '1' : '0');
+ } else {
+ mangleNumber(Value);
+ }
+ Out << 'E';
+
+}
+
+/// Mangles a member expression. Implicit accesses are not handled,
+/// but that should be okay, because you shouldn't be able to
+/// make an implicit access in a function template declaration.
+void CXXNameMangler::mangleMemberExpr(const Expr *Base,
+ bool IsArrow,
+ NestedNameSpecifier *Qualifier,
+ DeclarationName Member,
+ unsigned Arity) {
+ // gcc-4.4 uses 'dt' for dot expressions, which is reasonable.
+ // OTOH, gcc also mangles the name as an expression.
+ Out << (IsArrow ? "pt" : "dt");
+ mangleExpression(Base);
+ mangleUnresolvedName(Qualifier, Member, Arity);
+}
+
+void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
+ // <expression> ::= <unary operator-name> <expression>
+ // ::= <binary operator-name> <expression> <expression>
+ // ::= <trinary operator-name> <expression> <expression> <expression>
+ // ::= cl <expression>* E # call
+ // ::= cv <type> expression # conversion with one argument
+ // ::= cv <type> _ <expression>* E # conversion with a different number of arguments
+ // ::= st <type> # sizeof (a type)
+ // ::= at <type> # alignof (a type)
+ // ::= <template-param>
+ // ::= <function-param>
+ // ::= sr <type> <unqualified-name> # dependent name
+ // ::= sr <type> <unqualified-name> <template-args> # dependent template-id
+ // ::= sZ <template-param> # size of a parameter pack
+ // ::= <expr-primary>
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ // ::= L <type <value float> E # floating literal
+ // ::= L <mangled-name> E # external name
+ switch (E->getStmtClass()) {
+ case Expr::NoStmtClass:
+#define EXPR(Type, Base)
+#define STMT(Type, Base) \
+ case Expr::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ // fallthrough
+
+ // These all can only appear in local or variable-initialization
+ // contexts and so should never appear in a mangling.
+ case Expr::AddrLabelExprClass:
+ case Expr::BlockDeclRefExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::InitListExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ llvm_unreachable("unexpected statement kind");
+ break;
+
+ // FIXME: invent manglings for all these.
+ case Expr::BlockExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::ChooseExprClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCImplicitSetterGetterRefExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCSuperExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::StmtExprClass:
+ case Expr::TypesCompatibleExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::VAArgExprClass: {
+ // As bad as this diagnostic is, it's better than crashing.
+ Diagnostic &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot yet mangle expression type %0");
+ Diags.Report(FullSourceLoc(E->getExprLoc(),
+ getASTContext().getSourceManager()),
+ DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+ break;
+ }
+
+ case Expr::CXXDefaultArgExprClass:
+ mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
+ break;
+
+ case Expr::CXXMemberCallExprClass: // fallthrough
+ case Expr::CallExprClass: {
+ const CallExpr *CE = cast<CallExpr>(E);
+ Out << "cl";
+ mangleExpression(CE->getCallee(), CE->getNumArgs());
+ for (unsigned I = 0, N = CE->getNumArgs(); I != N; ++I)
+ mangleExpression(CE->getArg(I));
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CXXNewExprClass: {
+ // Proposal from David Vandervoorde, 2010.06.30
+ const CXXNewExpr *New = cast<CXXNewExpr>(E);
+ if (New->isGlobalNew()) Out << "gs";
+ Out << (New->isArray() ? "na" : "nw");
+ for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(),
+ E = New->placement_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ Out << '_';
+ mangleType(New->getAllocatedType());
+ if (New->hasInitializer()) {
+ Out << "pi";
+ for (CXXNewExpr::const_arg_iterator I = New->constructor_arg_begin(),
+ E = New->constructor_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ }
+ Out << 'E';
+ break;
+ }
+
+ case Expr::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), ME->getMemberDecl()->getDeclName(),
+ Arity);
+ break;
+ }
+
+ case Expr::UnresolvedMemberExprClass: {
+ const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), ME->getMemberName(),
+ Arity);
+ if (ME->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ME->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXDependentScopeMemberExprClass: {
+ const CXXDependentScopeMemberExpr *ME
+ = cast<CXXDependentScopeMemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), ME->getMember(),
+ Arity);
+ if (ME->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ME->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::UnresolvedLookupExprClass: {
+ // The ABI doesn't cover how to mangle overload sets, so we mangle
+ // using something as close as possible to the original lookup
+ // expression.
+ const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
+ mangleUnresolvedName(ULE->getQualifier(), ULE->getName(), Arity);
+ if (ULE->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ULE->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXUnresolvedConstructExprClass: {
+ const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
+ unsigned N = CE->arg_size();
+
+ Out << "cv";
+ mangleType(CE->getType());
+ if (N != 1) Out << '_';
+ for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+ if (N != 1) Out << 'E';
+ break;
+ }
+
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass: {
+ const CXXConstructExpr *CE = cast<CXXConstructExpr>(E);
+ unsigned N = CE->getNumArgs();
+
+ Out << "cv";
+ mangleType(CE->getType());
+ if (N != 1) Out << '_';
+ for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+ if (N != 1) Out << 'E';
+ break;
+ }
+
+ case Expr::SizeOfAlignOfExprClass: {
+ const SizeOfAlignOfExpr *SAE = cast<SizeOfAlignOfExpr>(E);
+ if (SAE->isSizeOf()) Out << 's';
+ else Out << 'a';
+ if (SAE->isArgumentType()) {
+ Out << 't';
+ mangleType(SAE->getArgumentType());
+ } else {
+ Out << 'z';
+ mangleExpression(SAE->getArgumentExpr());
+ }
+ break;
+ }
+
+ case Expr::CXXThrowExprClass: {
+ const CXXThrowExpr *TE = cast<CXXThrowExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TE->getSubExpr()) {
+ Out << "tw";
+ mangleExpression(TE->getSubExpr());
+ } else {
+ Out << "tr";
+ }
+ break;
+ }
+
+ case Expr::CXXTypeidExprClass: {
+ const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TIE->isTypeOperand()) {
+ Out << "ti";
+ mangleType(TIE->getTypeOperand());
+ } else {
+ Out << "te";
+ mangleExpression(TIE->getExprOperand());
+ }
+ break;
+ }
+
+ case Expr::CXXDeleteExprClass: {
+ const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (DE->isGlobalDelete()) Out << "gs";
+ Out << (DE->isArrayForm() ? "da" : "dl");
+ mangleExpression(DE->getArgument());
+ break;
+ }
+
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(E);
+ mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
+ /*Arity=*/1);
+ mangleExpression(UO->getSubExpr());
+ break;
+ }
+
+ case Expr::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E);
+
+ // Array subscript is treated as a syntactically wierd form of
+ // binary operator.
+ Out << "ix";
+ mangleExpression(AE->getLHS());
+ mangleExpression(AE->getRHS());
+ break;
+ }
+
+ case Expr::CompoundAssignOperatorClass: // fallthrough
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(E);
+ mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
+ /*Arity=*/2);
+ mangleExpression(BO->getLHS());
+ mangleExpression(BO->getRHS());
+ break;
+ }
+
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *CO = cast<ConditionalOperator>(E);
+ mangleOperatorName(OO_Conditional, /*Arity=*/3);
+ mangleExpression(CO->getCond());
+ mangleExpression(CO->getLHS(), Arity);
+ mangleExpression(CO->getRHS(), Arity);
+ break;
+ }
+
+ case Expr::ImplicitCastExprClass: {
+ mangleExpression(cast<ImplicitCastExpr>(E)->getSubExpr(), Arity);
+ break;
+ }
+
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::CXXFunctionalCastExprClass: {
+ const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E);
+ Out << "cv";
+ mangleType(ECE->getType());
+ mangleExpression(ECE->getSubExpr());
+ break;
+ }
+
+ case Expr::CXXOperatorCallExprClass: {
+ const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
+ unsigned NumArgs = CE->getNumArgs();
+ mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs);
+ // Mangle the arguments.
+ for (unsigned i = 0; i != NumArgs; ++i)
+ mangleExpression(CE->getArg(i));
+ break;
+ }
+
+ case Expr::ParenExprClass:
+ mangleExpression(cast<ParenExpr>(E)->getSubExpr(), Arity);
+ break;
+
+ case Expr::DeclRefExprClass: {
+ const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
+
+ switch (D->getKind()) {
+ default:
+ // <expr-primary> ::= L <mangled-name> E # external name
+ Out << 'L';
+ mangle(D, "_Z");
+ Out << 'E';
+ break;
+
+ case Decl::EnumConstant: {
+ const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
+ mangleIntegerLiteral(ED->getType(), ED->getInitVal());
+ break;
+ }
+
+ case Decl::NonTypeTemplateParm: {
+ const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
+ mangleTemplateParameter(PD->getIndex());
+ break;
+ }
+
+ }
+
+ break;
+ }
+
+ case Expr::DependentScopeDeclRefExprClass: {
+ const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
+ NestedNameSpecifier *NNS = DRE->getQualifier();
+ const Type *QTy = NNS->getAsType();
+
+ // When we're dealing with a nested-name-specifier that has just a
+ // dependent identifier in it, mangle that as a typename. FIXME:
+ // It isn't clear that we ever actually want to have such a
+ // nested-name-specifier; why not just represent it as a typename type?
+ if (!QTy && NNS->getAsIdentifier() && NNS->getPrefix()) {
+ QTy = getASTContext().getDependentNameType(ETK_Typename,
+ NNS->getPrefix(),
+ NNS->getAsIdentifier())
+ .getTypePtr();
+ }
+ assert(QTy && "Qualifier was not type!");
+
+ // ::= sr <type> <unqualified-name> # dependent name
+ // ::= sr <type> <unqualified-name> <template-args> # dependent template-id
+ Out << "sr";
+ mangleType(QualType(QTy, 0));
+ mangleUnqualifiedName(0, DRE->getDeclName(), Arity);
+ if (DRE->hasExplicitTemplateArgs())
+ mangleTemplateArgs(DRE->getExplicitTemplateArgs());
+
+ break;
+ }
+
+ case Expr::CXXBindTemporaryExprClass:
+ mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+ break;
+
+ case Expr::CXXExprWithTemporariesClass:
+ mangleExpression(cast<CXXExprWithTemporaries>(E)->getSubExpr(), Arity);
+ break;
+
+ case Expr::FloatingLiteralClass: {
+ const FloatingLiteral *FL = cast<FloatingLiteral>(E);
+ Out << 'L';
+ mangleType(FL->getType());
+ mangleFloat(FL->getValue());
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CharacterLiteralClass:
+ Out << 'L';
+ mangleType(E->getType());
+ Out << cast<CharacterLiteral>(E)->getValue();
+ Out << 'E';
+ break;
+
+ case Expr::CXXBoolLiteralExprClass:
+ Out << "Lb";
+ Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0');
+ Out << 'E';
+ break;
+
+ case Expr::IntegerLiteralClass: {
+ llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue());
+ if (E->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleIntegerLiteral(E->getType(), Value);
+ break;
+ }
+
+ case Expr::ImaginaryLiteralClass: {
+ const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
+ // Mangle as if a complex literal.
+ // Proposal from David Vandervoorde, 2010.06.30.
+ Out << 'L';
+ mangleType(E->getType());
+ if (const FloatingLiteral *Imag =
+ dyn_cast<FloatingLiteral>(IE->getSubExpr())) {
+ // Mangle a floating-point zero of the appropriate type.
+ mangleFloat(llvm::APFloat(Imag->getValue().getSemantics()));
+ Out << '_';
+ mangleFloat(Imag->getValue());
+ } else {
+ Out << '0' << '_';
+ llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue());
+ if (IE->getSubExpr()->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleNumber(Value);
+ }
+ Out << 'E';
+ break;
+ }
+
+ case Expr::StringLiteralClass: {
+ // Revised proposal from David Vandervoorde, 2010.07.15.
+ Out << 'L';
+ assert(isa<ConstantArrayType>(E->getType()));
+ mangleType(E->getType());
+ Out << 'E';
+ break;
+ }
+
+ case Expr::GNUNullExprClass:
+ // FIXME: should this really be mangled the same as nullptr?
+ // fallthrough
+
+ case Expr::CXXNullPtrLiteralExprClass: {
+ // Proposal from David Vandervoorde, 2010.06.30, as
+ // modified by ABI list discussion.
+ Out << "LDnE";
+ break;
+ }
+
+ }
+}
+
+void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
+ // <ctor-dtor-name> ::= C1 # complete object constructor
+ // ::= C2 # base object constructor
+ // ::= C3 # complete object allocating constructor
+ //
+ switch (T) {
+ case Ctor_Complete:
+ Out << "C1";
+ break;
+ case Ctor_Base:
+ Out << "C2";
+ break;
+ case Ctor_CompleteAllocating:
+ Out << "C3";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
+ // <ctor-dtor-name> ::= D0 # deleting destructor
+ // ::= D1 # complete object destructor
+ // ::= D2 # base object destructor
+ //
+ switch (T) {
+ case Dtor_Deleting:
+ Out << "D0";
+ break;
+ case Dtor_Complete:
+ Out << "D1";
+ break;
+ case Dtor_Base:
+ Out << "D2";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleTemplateArgs(
+ const ExplicitTemplateArgumentList &TemplateArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned I = 0, E = TemplateArgs.NumTemplateArgs; I != E; ++I)
+ mangleTemplateArg(0, TemplateArgs.getTemplateArgs()[I].getArgument());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(TemplateName Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplateArgs(*TD->getTemplateParameters(), TemplateArgs,
+ NumTemplateArgs);
+
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ mangleTemplateArg(0, TemplateArgs[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgumentList &AL) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0, e = AL.size(); i != e; ++i)
+ mangleTemplateArg(PL.getParam(i), AL[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ mangleTemplateArg(PL.getParam(i), TemplateArgs[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
+ const TemplateArgument &A) {
+ // <template-arg> ::= <type> # type or template
+ // ::= X <expression> E # expression
+ // ::= <expr-primary> # simple expressions
+ // ::= I <template-arg>* E # argument pack
+ // ::= sp <expression> # pack expansion of (C++0x)
+ switch (A.getKind()) {
+ default:
+ assert(0 && "Unknown template argument kind!");
+ case TemplateArgument::Type:
+ mangleType(A.getAsType());
+ break;
+ case TemplateArgument::Template:
+ // This is mangled as <type>.
+ mangleType(A.getAsTemplate());
+ break;
+ case TemplateArgument::Expression:
+ Out << 'X';
+ mangleExpression(A.getAsExpr());
+ Out << 'E';
+ break;
+ case TemplateArgument::Integral:
+ mangleIntegerLiteral(A.getIntegralType(), *A.getAsIntegral());
+ break;
+ case TemplateArgument::Declaration: {
+ assert(P && "Missing template parameter for declaration argument");
+ // <expr-primary> ::= L <mangled-name> E # external name
+
+ // Clang produces AST's where pointer-to-member-function expressions
+ // and pointer-to-function expressions are represented as a declaration not
+ // an expression. We compensate for it here to produce the correct mangling.
+ NamedDecl *D = cast<NamedDecl>(A.getAsDecl());
+ const NonTypeTemplateParmDecl *Parameter = cast<NonTypeTemplateParmDecl>(P);
+ bool compensateMangling = D->isCXXClassMember() &&
+ !Parameter->getType()->isReferenceType();
+ if (compensateMangling) {
+ Out << 'X';
+ mangleOperatorName(OO_Amp, 1);
+ }
+
+ Out << 'L';
+ // References to external entities use the mangled name; if the name would
+ // not normally be manged then mangle it as unqualified.
+ //
+ // FIXME: The ABI specifies that external names here should have _Z, but
+ // gcc leaves this off.
+ if (compensateMangling)
+ mangle(D, "_Z");
+ else
+ mangle(D, "Z");
+ Out << 'E';
+
+ if (compensateMangling)
+ Out << 'E';
+
+ break;
+ }
+ }
+}
+
+void CXXNameMangler::mangleTemplateParameter(unsigned Index) {
+ // <template-param> ::= T_ # first template parameter
+ // ::= T <parameter-2 non-negative number> _
+ if (Index == 0)
+ Out << "T_";
+ else
+ Out << 'T' << (Index - 1) << '_';
+}
+
+// <substitution> ::= S <seq-id> _
+// ::= S_
+bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
+ // Try one of the standard substitutions first.
+ if (mangleStandardSubstitution(ND))
+ return true;
+
+ ND = cast<NamedDecl>(ND->getCanonicalDecl());
+ return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
+}
+
+bool CXXNameMangler::mangleSubstitution(QualType T) {
+ if (!T.getCVRQualifiers()) {
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return mangleSubstitution(RT->getDecl());
+ }
+
+ uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+
+ return mangleSubstitution(TypePtr);
+}
+
+bool CXXNameMangler::mangleSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ return mangleSubstitution(
+ reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
+bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
+ llvm::DenseMap<uintptr_t, unsigned>::iterator I = Substitutions.find(Ptr);
+ if (I == Substitutions.end())
+ return false;
+
+ unsigned SeqID = I->second;
+ if (SeqID == 0)
+ Out << "S_";
+ else {
+ SeqID--;
+
+ // <seq-id> is encoded in base-36, using digits and upper case letters.
+ char Buffer[10];
+ char *BufferPtr = llvm::array_endof(Buffer);
+
+ if (SeqID == 0) *--BufferPtr = '0';
+
+ while (SeqID) {
+ assert(BufferPtr > Buffer && "Buffer overflow!");
+
+ char c = static_cast<char>(SeqID % 36);
+
+ *--BufferPtr = (c < 10 ? '0' + c : 'A' + c - 10);
+ SeqID /= 36;
+ }
+
+ Out << 'S'
+ << llvm::StringRef(BufferPtr, llvm::array_endof(Buffer)-BufferPtr)
+ << '_';
+ }
+
+ return true;
+}
+
+static bool isCharType(QualType T) {
+ if (T.isNull())
+ return false;
+
+ return T->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ T->isSpecificBuiltinType(BuiltinType::Char_U);
+}
+
+/// isCharSpecialization - Returns whether a given type is a template
+/// specialization of a given name with a single argument of type char.
+static bool isCharSpecialization(QualType T, const char *Name) {
+ if (T.isNull())
+ return false;
+
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ if (!SD)
+ return false;
+
+ if (!isStdNamespace(SD->getDeclContext()))
+ return false;
+
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+ if (TemplateArgs.size() != 1)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ return SD->getIdentifier()->getName() == Name;
+}
+
+template <std::size_t StrLen>
+bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl *SD,
+ const char (&Str)[StrLen]) {
+ if (!SD->getIdentifier()->isStr(Str))
+ return false;
+
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+ if (TemplateArgs.size() != 2)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ return false;
+
+ return true;
+}
+
+bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
+ // <substitution> ::= St # ::std::
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (isStd(NS)) {
+ Out << "St";
+ return true;
+ }
+ }
+
+ if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) {
+ if (!isStdNamespace(TD->getDeclContext()))
+ return false;
+
+ // <substitution> ::= Sa # ::std::allocator
+ if (TD->getIdentifier()->isStr("allocator")) {
+ Out << "Sa";
+ return true;
+ }
+
+ // <<substitution> ::= Sb # ::std::basic_string
+ if (TD->getIdentifier()->isStr("basic_string")) {
+ Out << "Sb";
+ return true;
+ }
+ }
+
+ if (const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ if (!isStdNamespace(SD->getDeclContext()))
+ return false;
+
+ // <substitution> ::= Ss # ::std::basic_string<char,
+ // ::std::char_traits<char>,
+ // ::std::allocator<char> >
+ if (SD->getIdentifier()->isStr("basic_string")) {
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+
+ if (TemplateArgs.size() != 3)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator"))
+ return false;
+
+ Out << "Ss";
+ return true;
+ }
+
+ // <substitution> ::= Si # ::std::basic_istream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_istream")) {
+ Out << "Si";
+ return true;
+ }
+
+ // <substitution> ::= So # ::std::basic_ostream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_ostream")) {
+ Out << "So";
+ return true;
+ }
+
+ // <substitution> ::= Sd # ::std::basic_iostream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_iostream")) {
+ Out << "Sd";
+ return true;
+ }
+ }
+ return false;
+}
+
+void CXXNameMangler::addSubstitution(QualType T) {
+ if (!T.getCVRQualifiers()) {
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ addSubstitution(RT->getDecl());
+ return;
+ }
+ }
+
+ uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+ addSubstitution(TypePtr);
+}
+
+void CXXNameMangler::addSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return addSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ addSubstitution(reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
+void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
+ assert(!Substitutions.count(Ptr) && "Substitution already exists!");
+ Substitutions[Ptr] = SeqID++;
+}
+
+//
+
+/// \brief Mangles the name of the declaration D and emits that name to the
+/// given output stream.
+///
+/// If the declaration D requires a mangled name, this routine will emit that
+/// mangled name to \p os and return true. Otherwise, \p os will be unchanged
+/// and this routine will return false. In this case, the caller should just
+/// emit the identifier of the declaration (\c D->getIdentifier()) as its
+/// name.
+void MangleContext::mangleName(const NamedDecl *D,
+ llvm::SmallVectorImpl<char> &Res) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ CXXNameMangler Mangler(*this, Res);
+ return Mangler.mangle(D);
+}
+
+void MangleContext::mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &Res) {
+ CXXNameMangler Mangler(*this, Res, D, Type);
+ Mangler.mangle(D);
+}
+
+void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &Res) {
+ CXXNameMangler Mangler(*this, Res, D, Type);
+ Mangler.mangle(D);
+}
+
+void MangleContext::mangleBlock(GlobalDecl GD, const BlockDecl *BD,
+ llvm::SmallVectorImpl<char> &Res) {
+ MiscNameMangler Mangler(*this, Res);
+ Mangler.mangleBlock(GD, BD);
+}
+
+void MangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= T <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // # first call-offset is 'this' adjustment
+ // # second call-offset is result adjustment
+
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Use mangleCXXDtor for destructor decls!");
+
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZT";
+ if (!Thunk.Return.isEmpty())
+ Mangler.getStream() << 'c';
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(Thunk.This.NonVirtual, Thunk.This.VCallOffsetOffset);
+
+ // Mangle the return pointer adjustment if there is one.
+ if (!Thunk.Return.isEmpty())
+ Mangler.mangleCallOffset(Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(MD);
+}
+
+void
+MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= T <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+
+ CXXNameMangler Mangler(*this, Res, DD, Type);
+ Mangler.getStream() << "_ZT";
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
+ ThisAdjustment.VCallOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(DD);
+}
+
+/// mangleGuardVariable - Returns the mangled name for a guard variable
+/// for the passed in VarDecl.
+void MangleContext::mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= GV <object name> # Guard variable for one-time
+ // # initialization
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZGV";
+ Mangler.mangleName(D);
+}
+
+void MangleContext::mangleReferenceTemporary(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &Res) {
+ // We match the GCC mangling here.
+ // <special-name> ::= GR <object name>
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZGR";
+ Mangler.mangleName(D);
+}
+
+void MangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= TV <type> # virtual table
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZTV";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+}
+
+void MangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= TT <type> # VTT structure
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZTT";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+}
+
+void MangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= TC <type> <offset number> _ <base type>
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZTC";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+ Mangler.getStream() << Offset;
+ Mangler.getStream() << '_';
+ Mangler.mangleNameOrStandardSubstitution(Type);
+}
+
+void MangleContext::mangleCXXRTTI(QualType Ty,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= TI <type> # typeinfo structure
+ assert(!Ty.hasQualifiers() && "RTTI info cannot have top-level qualifiers");
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZTI";
+ Mangler.mangleType(Ty);
+}
+
+void MangleContext::mangleCXXRTTIName(QualType Ty,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= TS <type> # typeinfo name (null terminated byte string)
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZTS";
+ Mangler.mangleType(Ty);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
new file mode 100644
index 0000000..139f6c0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
@@ -0,0 +1,177 @@
+//===--- Mangle.h - Mangle C++ Names ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CODEGEN_MANGLE_H
+#define LLVM_CLANG_CODEGEN_MANGLE_H
+
+#include "CGCXX.h"
+#include "GlobalDecl.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+ class ASTContext;
+ class BlockDecl;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class FunctionDecl;
+ class NamedDecl;
+ class ObjCMethodDecl;
+ class VarDecl;
+
+namespace CodeGen {
+ struct ThisAdjustment;
+ struct ThunkInfo;
+
+/// MangleBuffer - a convenient class for storing a name which is
+/// either the result of a mangling or is a constant string with
+/// external memory ownership.
+class MangleBuffer {
+public:
+ void setString(llvm::StringRef Ref) {
+ String = Ref;
+ }
+
+ llvm::SmallVectorImpl<char> &getBuffer() {
+ return Buffer;
+ }
+
+ llvm::StringRef getString() const {
+ if (!String.empty()) return String;
+ return Buffer.str();
+ }
+
+ operator llvm::StringRef() const {
+ return getString();
+ }
+
+private:
+ llvm::StringRef String;
+ llvm::SmallString<256> Buffer;
+};
+
+/// MangleContext - Context for tracking state which persists across multiple
+/// calls to the C++ name mangler.
+class MangleContext {
+ ASTContext &Context;
+ Diagnostic &Diags;
+
+ llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
+ unsigned Discriminator;
+ llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
+ llvm::DenseMap<const BlockDecl*, unsigned> GlobalBlockIds;
+ llvm::DenseMap<const BlockDecl*, unsigned> LocalBlockIds;
+
+public:
+ explicit MangleContext(ASTContext &Context,
+ Diagnostic &Diags)
+ : Context(Context), Diags(Diags) { }
+
+ virtual ~MangleContext() { }
+
+ ASTContext &getASTContext() const { return Context; }
+
+ Diagnostic &getDiags() const { return Diags; }
+
+ void startNewFunction() { LocalBlockIds.clear(); }
+
+ uint64_t getAnonymousStructId(const TagDecl *TD) {
+ std::pair<llvm::DenseMap<const TagDecl *,
+ uint64_t>::iterator, bool> Result =
+ AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size()));
+ return Result.first->second;
+ }
+
+ unsigned getBlockId(const BlockDecl *BD, bool Local) {
+ llvm::DenseMap<const BlockDecl *, unsigned> &BlockIds
+ = Local? LocalBlockIds : GlobalBlockIds;
+ std::pair<llvm::DenseMap<const BlockDecl *, unsigned>::iterator, bool>
+ Result = BlockIds.insert(std::make_pair(BD, BlockIds.size()));
+ return Result.first->second;
+ }
+
+ /// @name Mangler Entry Points
+ /// @{
+
+ virtual bool shouldMangleDeclName(const NamedDecl *D);
+ virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
+ virtual void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleReferenceTemporary(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &);
+ void mangleBlock(GlobalDecl GD,
+ const BlockDecl *BD, llvm::SmallVectorImpl<char> &);
+
+ void mangleInitDiscriminator() {
+ Discriminator = 0;
+ }
+
+ bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
+ unsigned &discriminator = Uniquifier[ND];
+ if (!discriminator)
+ discriminator = ++Discriminator;
+ if (discriminator == 1)
+ return false;
+ disc = discriminator-2;
+ return true;
+ }
+ /// @}
+};
+
+/// MiscNameMangler - Mangles Objective-C method names and blocks.
+class MiscNameMangler {
+ MangleContext &Context;
+ llvm::raw_svector_ostream Out;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ MiscNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res);
+
+ llvm::raw_svector_ostream &getStream() { return Out; }
+
+ void mangleBlock(GlobalDecl GD, const BlockDecl *BD);
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
new file mode 100644
index 0000000..9407335
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -0,0 +1,1218 @@
+//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targetting the Microsoft Visual C++ ABI.
+// The class in this file generates structures that follow the Microsoft
+// Visual C++ ABI, which is actually not very well documented at all outside
+// of Microsoft.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "CGVTables.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftCXXNameMangler {
+ MangleContext &Context;
+ llvm::raw_svector_ostream Out;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ MicrosoftCXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
+ : Context(C), Out(Res) { }
+
+ void mangle(const NamedDecl *D, llvm::StringRef Prefix = "?");
+ void mangleName(const NamedDecl *ND);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleVariableEncoding(const VarDecl *VD);
+ void mangleNumber(int64_t Number);
+ void mangleType(QualType T);
+
+private:
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName());
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
+ void mangleSourceName(const IdentifierInfo *II);
+ void manglePostfix(const DeclContext *DC, bool NoFunction=false);
+ void mangleOperatorName(OverloadedOperatorKind OO);
+ void mangleQualifiers(Qualifiers Quals, bool IsMember);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleType(const FunctionType *T, const FunctionDecl *D,
+ bool IsStructor, bool IsInstMethod);
+ void mangleType(const ArrayType *T, bool IsGlobal);
+ void mangleExtraDimensions(QualType T);
+ void mangleFunctionClass(const FunctionDecl *FD);
+ void mangleCallingConvention(const FunctionType *T);
+ void mangleThrowSpecification(const FunctionProtoType *T);
+
+};
+
+/// MicrosoftMangleContext - Overrides the default MangleContext for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftMangleContext : public MangleContext {
+public:
+ MicrosoftMangleContext(ASTContext &Context,
+ Diagnostic &Diags) : MangleContext(Context, Diags) { }
+ virtual bool shouldMangleDeclName(const NamedDecl *D);
+ virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
+ virtual void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &);
+};
+
+class MicrosoftCXXABI : public CGCXXABI {
+ MicrosoftMangleContext MangleCtx;
+public:
+ MicrosoftCXXABI(CodeGenModule &CGM)
+ : CGCXXABI(CGM), MangleCtx(CGM.getContext(), CGM.getDiags()) {}
+
+ MicrosoftMangleContext &getMangleContext() {
+ return MangleCtx;
+ }
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ // 'this' is already in place
+ // TODO: 'for base' flag
+ }
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Ctor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ // 'this' is already in place
+ // TODO: 'for base' flag
+ }
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ BuildThisParam(CGF, Params);
+ // TODO: 'for base' flag
+ }
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ EmitThisParam(CGF);
+ // TODO: 'for base' flag
+ }
+};
+
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
+ for (const DeclContext *DC = D->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+ // (always) as does passing a C++ member function and a function
+ // whose name is not a simple identifier.
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+ !FD->getDeclName().isIdentifier()))
+ return true;
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOptions().CPlusPlus)
+ return false;
+
+ // Variables at global scope with internal linkage are not mangled.
+ if (!FD) {
+ const DeclContext *DC = D->getDeclContext();
+ if (DC->isTranslationUnit() && D->getLinkage() == InternalLinkage)
+ return false;
+ }
+
+ // C functions and "main" are not mangled.
+ if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+ return false;
+
+ return true;
+}
+
+void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
+ llvm::StringRef Prefix) {
+ // MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
+ // Therefore it's really important that we don't decorate the
+ // name with leading underscores or leading/trailing at signs. So, emit a
+ // asm marker at the start so we get the name right.
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+ Out << ALA->getLabel();
+ return;
+ }
+
+ // <mangled-name> ::= ? <name> <type-encoding>
+ Out << Prefix;
+ mangleName(D);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleVariableEncoding(VD);
+ // TODO: Fields? Can MSVC even mangle them?
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <type-encoding> ::= <function-class> <function-type>
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ // We should never ever see a FunctionNoProtoType at this point.
+ // We don't even know how to mangle their types anyway :).
+ const FunctionProtoType *FT = cast<FunctionProtoType>(FD->getType());
+
+ bool InStructor = false, InInstMethod = false;
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD) {
+ if (MD->isInstance())
+ InInstMethod = true;
+ if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))
+ InStructor = true;
+ }
+
+ // First, the function class.
+ mangleFunctionClass(FD);
+
+ mangleType(FT, FD, InStructor, InInstMethod);
+}
+
+void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
+ // <type-encoding> ::= <storage-class> <variable-type>
+ // <storage-class> ::= 0 # private static member
+ // ::= 1 # protected static member
+ // ::= 2 # public static member
+ // ::= 3 # global
+ // ::= 4 # static local
+
+ // The first character in the encoding (after the name) is the storage class.
+ if (VD->isStaticDataMember()) {
+ // If it's a static member, it also encodes the access level.
+ switch (VD->getAccess()) {
+ default:
+ case AS_private: Out << '0'; break;
+ case AS_protected: Out << '1'; break;
+ case AS_public: Out << '2'; break;
+ }
+ }
+ else if (!VD->isStaticLocal())
+ Out << '3';
+ else
+ Out << '4';
+ // Now mangle the type.
+ // <variable-type> ::= <type> <cvr-qualifiers>
+ // ::= <type> A # pointers, references, arrays
+ // Pointers and references are odd. The type of 'int * const foo;' gets
+ // mangled as 'QAHA' instead of 'PAHB', for example.
+ QualType Ty = VD->getType();
+ if (Ty->isPointerType() || Ty->isReferenceType()) {
+ mangleType(Ty);
+ Out << 'A';
+ } else if (Ty->isArrayType()) {
+ // Global arrays are funny, too.
+ mangleType(static_cast<ArrayType *>(Ty.getTypePtr()), true);
+ Out << 'A';
+ } else {
+ mangleType(Ty.getLocalUnqualifiedType());
+ mangleQualifiers(Ty.getLocalQualifiers(), false);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
+ const DeclContext *DC = ND->getDeclContext();
+
+ // Always start with the unqualified name.
+ mangleUnqualifiedName(ND);
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ manglePostfix(DC);
+
+ // Terminate the whole name with an '@'.
+ Out << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [?] <decimal digit> # <= 9
+ // ::= [?] <hex digit>+ @ # > 9; A = 0, B = 1, etc...
+ if (Number < 0) {
+ Out << '?';
+ Number = -Number;
+ }
+ if (Number >= 1 && Number <= 10) {
+ Out << Number-1;
+ } else {
+ // We have to build up the encoding in reverse order, so it will come
+ // out right when we write it out.
+ char Encoding[16];
+ char *EndPtr = Encoding+sizeof(Encoding);
+ char *CurPtr = EndPtr;
+ while (Number) {
+ *--CurPtr = 'A' + (Number % 16);
+ Number /= 16;
+ }
+ Out.write(CurPtr, EndPtr-CurPtr);
+ Out << '@';
+ }
+}
+
+void
+MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ Out << "?A";
+ break;
+ }
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // When VC encounters an anonymous type with no tag and no typedef,
+ // it literally emits '<unnamed-tag>'.
+ Out << "<unnamed-tag>";
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ assert(false && "Can't mangle Objective-C selector names here!");
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ assert(false && "Can't mangle constructors yet!");
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ assert(false && "Can't mangle destructors yet!");
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= ?B # (cast)
+ // The target type is encoded as the return type.
+ Out << "?B";
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ mangleOperatorName(Name.getCXXOverloadedOperator());
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ // FIXME: Was this added in VS2010? Does MS even know how to mangle this?
+ assert(false && "Don't know how to mangle literal operators yet!");
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ assert(false && "Can't mangle a using directive name!");
+ break;
+ }
+}
+
+void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
+ bool NoFunction) {
+ // <postfix> ::= <unqualified-name> [<postfix>]
+ // ::= <template-postfix> <template-args> [<postfix>]
+ // ::= <template-param>
+ // ::= <substitution> [<postfix>]
+
+ if (!DC) return;
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ llvm::SmallString<64> Name;
+ Context.mangleBlock(GlobalDecl(), BD, Name);
+ Out << Name << '@';
+ return manglePostfix(DC->getParent(), NoFunction);
+ }
+
+ if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
+ return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ mangleUnqualifiedName(cast<NamedDecl>(DC));
+ manglePostfix(DC->getParent(), NoFunction);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
+ switch (OO) {
+ // ?0 # constructor
+ // ?1 # destructor
+ // <operator-name> ::= ?2 # new
+ case OO_New: Out << "?2"; break;
+ // <operator-name> ::= ?3 # delete
+ case OO_Delete: Out << "?3"; break;
+ // <operator-name> ::= ?4 # =
+ case OO_Equal: Out << "?4"; break;
+ // <operator-name> ::= ?5 # >>
+ case OO_GreaterGreater: Out << "?5"; break;
+ // <operator-name> ::= ?6 # <<
+ case OO_LessLess: Out << "?6"; break;
+ // <operator-name> ::= ?7 # !
+ case OO_Exclaim: Out << "?7"; break;
+ // <operator-name> ::= ?8 # ==
+ case OO_EqualEqual: Out << "?8"; break;
+ // <operator-name> ::= ?9 # !=
+ case OO_ExclaimEqual: Out << "?9"; break;
+ // <operator-name> ::= ?A # []
+ case OO_Subscript: Out << "?A"; break;
+ // ?B # conversion
+ // <operator-name> ::= ?C # ->
+ case OO_Arrow: Out << "?C"; break;
+ // <operator-name> ::= ?D # *
+ case OO_Star: Out << "?D"; break;
+ // <operator-name> ::= ?E # ++
+ case OO_PlusPlus: Out << "?E"; break;
+ // <operator-name> ::= ?F # --
+ case OO_MinusMinus: Out << "?F"; break;
+ // <operator-name> ::= ?G # -
+ case OO_Minus: Out << "?G"; break;
+ // <operator-name> ::= ?H # +
+ case OO_Plus: Out << "?H"; break;
+ // <operator-name> ::= ?I # &
+ case OO_Amp: Out << "?I"; break;
+ // <operator-name> ::= ?J # ->*
+ case OO_ArrowStar: Out << "?J"; break;
+ // <operator-name> ::= ?K # /
+ case OO_Slash: Out << "?K"; break;
+ // <operator-name> ::= ?L # %
+ case OO_Percent: Out << "?L"; break;
+ // <operator-name> ::= ?M # <
+ case OO_Less: Out << "?M"; break;
+ // <operator-name> ::= ?N # <=
+ case OO_LessEqual: Out << "?N"; break;
+ // <operator-name> ::= ?O # >
+ case OO_Greater: Out << "?O"; break;
+ // <operator-name> ::= ?P # >=
+ case OO_GreaterEqual: Out << "?P"; break;
+ // <operator-name> ::= ?Q # ,
+ case OO_Comma: Out << "?Q"; break;
+ // <operator-name> ::= ?R # ()
+ case OO_Call: Out << "?R"; break;
+ // <operator-name> ::= ?S # ~
+ case OO_Tilde: Out << "?S"; break;
+ // <operator-name> ::= ?T # ^
+ case OO_Caret: Out << "?T"; break;
+ // <operator-name> ::= ?U # |
+ case OO_Pipe: Out << "?U"; break;
+ // <operator-name> ::= ?V # &&
+ case OO_AmpAmp: Out << "?V"; break;
+ // <operator-name> ::= ?W # ||
+ case OO_PipePipe: Out << "?W"; break;
+ // <operator-name> ::= ?X # *=
+ case OO_StarEqual: Out << "?X"; break;
+ // <operator-name> ::= ?Y # +=
+ case OO_PlusEqual: Out << "?Y"; break;
+ // <operator-name> ::= ?Z # -=
+ case OO_MinusEqual: Out << "?Z"; break;
+ // <operator-name> ::= ?_0 # /=
+ case OO_SlashEqual: Out << "?_0"; break;
+ // <operator-name> ::= ?_1 # %=
+ case OO_PercentEqual: Out << "?_1"; break;
+ // <operator-name> ::= ?_2 # >>=
+ case OO_GreaterGreaterEqual: Out << "?_2"; break;
+ // <operator-name> ::= ?_3 # <<=
+ case OO_LessLessEqual: Out << "?_3"; break;
+ // <operator-name> ::= ?_4 # &=
+ case OO_AmpEqual: Out << "?_4"; break;
+ // <operator-name> ::= ?_5 # |=
+ case OO_PipeEqual: Out << "?_5"; break;
+ // <operator-name> ::= ?_6 # ^=
+ case OO_CaretEqual: Out << "?_6"; break;
+ // ?_7 # vftable
+ // ?_8 # vbtable
+ // ?_9 # vcall
+ // ?_A # typeof
+ // ?_B # local static guard
+ // ?_C # string
+ // ?_D # vbase destructor
+ // ?_E # vector deleting destructor
+ // ?_F # default constructor closure
+ // ?_G # scalar deleting destructor
+ // ?_H # vector constructor iterator
+ // ?_I # vector destructor iterator
+ // ?_J # vector vbase constructor iterator
+ // ?_K # virtual displacement map
+ // ?_L # eh vector constructor iterator
+ // ?_M # eh vector destructor iterator
+ // ?_N # eh vector vbase constructor iterator
+ // ?_O # copy constructor closure
+ // ?_P<name> # udt returning <name>
+ // ?_Q # <unknown>
+ // ?_R0 # RTTI Type Descriptor
+ // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d)
+ // ?_R2 # RTTI Base Class Array
+ // ?_R3 # RTTI Class Hierarchy Descriptor
+ // ?_R4 # RTTI Complete Object Locator
+ // ?_S # local vftable
+ // ?_T # local vftable constructor closure
+ // <operator-name> ::= ?_U # new[]
+ case OO_Array_New: Out << "?_U"; break;
+ // <operator-name> ::= ?_V # delete[]
+ case OO_Array_Delete: Out << "?_V"; break;
+
+ case OO_Conditional:
+ assert(false && "Don't know how to mangle ?:");
+ break;
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ assert(false && "Not an overloaded operator");
+ break;
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source name> ::= <identifier> @
+ Out << II->getName() << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ llvm::SmallString<64> Buffer;
+ MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD);
+ Out << Buffer;
+}
+
+void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
+ bool IsMember) {
+ // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
+ // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only);
+ // 'I' means __restrict (32/64-bit).
+ // Note that the MSVC __restrict keyword isn't the same as the C99 restrict
+ // keyword!
+ // <base-cvr-qualifiers> ::= A # near
+ // ::= B # near const
+ // ::= C # near volatile
+ // ::= D # near const volatile
+ // ::= E # far (16-bit)
+ // ::= F # far const (16-bit)
+ // ::= G # far volatile (16-bit)
+ // ::= H # far const volatile (16-bit)
+ // ::= I # huge (16-bit)
+ // ::= J # huge const (16-bit)
+ // ::= K # huge volatile (16-bit)
+ // ::= L # huge const volatile (16-bit)
+ // ::= M <basis> # based
+ // ::= N <basis> # based const
+ // ::= O <basis> # based volatile
+ // ::= P <basis> # based const volatile
+ // ::= Q # near member
+ // ::= R # near const member
+ // ::= S # near volatile member
+ // ::= T # near const volatile member
+ // ::= U # far member (16-bit)
+ // ::= V # far const member (16-bit)
+ // ::= W # far volatile member (16-bit)
+ // ::= X # far const volatile member (16-bit)
+ // ::= Y # huge member (16-bit)
+ // ::= Z # huge const member (16-bit)
+ // ::= 0 # huge volatile member (16-bit)
+ // ::= 1 # huge const volatile member (16-bit)
+ // ::= 2 <basis> # based member
+ // ::= 3 <basis> # based const member
+ // ::= 4 <basis> # based volatile member
+ // ::= 5 <basis> # based const volatile member
+ // ::= 6 # near function (pointers only)
+ // ::= 7 # far function (pointers only)
+ // ::= 8 # near method (pointers only)
+ // ::= 9 # far method (pointers only)
+ // ::= _A <basis> # based function (pointers only)
+ // ::= _B <basis> # based function (far?) (pointers only)
+ // ::= _C <basis> # based method (pointers only)
+ // ::= _D <basis> # based method (far?) (pointers only)
+ // ::= _E # block (Clang)
+ // <basis> ::= 0 # __based(void)
+ // ::= 1 # __based(segment)?
+ // ::= 2 <name> # __based(name)
+ // ::= 3 # ?
+ // ::= 4 # ?
+ // ::= 5 # not really based
+ if (!IsMember) {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'A';
+ else
+ Out << 'B';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'C';
+ else
+ Out << 'D';
+ }
+ } else {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'Q';
+ else
+ Out << 'R';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'S';
+ else
+ Out << 'T';
+ }
+ }
+
+ // FIXME: For now, just drop all extension qualifiers on the floor.
+}
+
+void MicrosoftCXXNameMangler::mangleType(QualType T) {
+ // Only operate on the canonical type!
+ T = getASTContext().getCanonicalType(T);
+
+ Qualifiers Quals = T.getLocalQualifiers();
+ if (Quals) {
+ // We have to mangle these now, while we still have enough information.
+ // <pointer-cvr-qualifiers> ::= P # pointer
+ // ::= Q # const pointer
+ // ::= R # volatile pointer
+ // ::= S # const volatile pointer
+ if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ if (!Quals.hasVolatile())
+ Out << 'Q';
+ else {
+ if (!Quals.hasConst())
+ Out << 'R';
+ else
+ Out << 'S';
+ }
+ } else
+ // Just emit qualifiers like normal.
+ // NB: When we mangle a pointer/reference type, and the pointee
+ // type has no qualifiers, the lack of qualifier gets mangled
+ // in there.
+ mangleQualifiers(Quals, false);
+ } else if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ Out << 'P';
+ }
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+return;
+#define TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
+break;
+#include "clang/AST/TypeNodes.def"
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= X # void
+ // ::= C # signed char
+ // ::= D # char
+ // ::= E # unsigned char
+ // ::= F # short
+ // ::= G # unsigned short (or wchar_t if it's not a builtin)
+ // ::= H # int
+ // ::= I # unsigned int
+ // ::= J # long
+ // ::= K # unsigned long
+ // L # <none>
+ // ::= M # float
+ // ::= N # double
+ // ::= O # long double (__float80 is mangled differently)
+ // ::= _D # __int8 (yup, it's a distinct type in MSVC)
+ // ::= _E # unsigned __int8
+ // ::= _F # __int16
+ // ::= _G # unsigned __int16
+ // ::= _H # __int32
+ // ::= _I # unsigned __int32
+ // ::= _J # long long, __int64
+ // ::= _K # unsigned long long, __int64
+ // ::= _L # __int128
+ // ::= _M # unsigned __int128
+ // ::= _N # bool
+ // _O # <array in parameter>
+ // ::= _T # __float80 (Intel)
+ // ::= _W # wchar_t
+ // ::= _Z # __float80 (Digital Mars)
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'X'; break;
+ case BuiltinType::SChar: Out << 'C'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break;
+ case BuiltinType::UChar: Out << 'E'; break;
+ case BuiltinType::Short: Out << 'F'; break;
+ case BuiltinType::UShort: Out << 'G'; break;
+ case BuiltinType::Int: Out << 'H'; break;
+ case BuiltinType::UInt: Out << 'I'; break;
+ case BuiltinType::Long: Out << 'J'; break;
+ case BuiltinType::ULong: Out << 'K'; break;
+ case BuiltinType::Float: Out << 'M'; break;
+ case BuiltinType::Double: Out << 'N'; break;
+ // TODO: Determine size and mangle accordingly
+ case BuiltinType::LongDouble: Out << 'O'; break;
+ // TODO: __int8 and friends
+ case BuiltinType::LongLong: Out << "_J"; break;
+ case BuiltinType::ULongLong: Out << "_K"; break;
+ case BuiltinType::Int128: Out << "_L"; break;
+ case BuiltinType::UInt128: Out << "_M"; break;
+ case BuiltinType::Bool: Out << "_N"; break;
+ case BuiltinType::WChar: Out << "_W"; break;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ assert(false &&
+ "Overloaded and dependent types shouldn't get to name mangling");
+ break;
+ case BuiltinType::UndeducedAuto:
+ assert(0 && "Should not see undeduced auto here");
+ break;
+ case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
+ case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
+ case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
+
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::NullPtr:
+ assert(false && "Don't know how to mangle this type");
+ break;
+ }
+}
+
+// <type> ::= <function-type>
+void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T) {
+ // Structors only appear in decls, so at this point we know it's not a
+ // structor type.
+ // I'll probably have mangleType(MemberPointerType) call the mangleType()
+ // method directly.
+ mangleType(T, NULL, false, false);
+}
+void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ llvm_unreachable("Can't mangle K&R function prototypes");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
+ const FunctionDecl *D,
+ bool IsStructor,
+ bool IsInstMethod) {
+ // <function-type> ::= <this-cvr-qualifiers> <calling-convention>
+ // <return-type> <argument-list> <throw-spec>
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // If this is a C++ instance method, mangle the CVR qualifiers for the
+ // this pointer.
+ if (IsInstMethod)
+ mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false);
+
+ mangleCallingConvention(T);
+
+ // <return-type> ::= <type>
+ // ::= @ # structors (they have no declared return type)
+ if (IsStructor)
+ Out << '@';
+ else
+ mangleType(Proto->getResultType());
+
+ // <argument-list> ::= X # void
+ // ::= <type>+ @
+ // ::= <type>* Z # varargs
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ Out << 'X';
+ } else {
+ if (D) {
+ // If we got a decl, use the "types-as-written" to make sure arrays
+ // get mangled right.
+ for (FunctionDecl::param_const_iterator Parm = D->param_begin(),
+ ParmEnd = D->param_end();
+ Parm != ParmEnd; ++Parm)
+ mangleType((*Parm)->getTypeSourceInfo()->getType());
+ } else {
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(*Arg);
+ }
+ // <builtin-type> ::= Z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'Z';
+ else
+ Out << '@';
+ }
+
+ mangleThrowSpecification(Proto);
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
+ // <function-class> ::= A # private: near
+ // ::= B # private: far
+ // ::= C # private: static near
+ // ::= D # private: static far
+ // ::= E # private: virtual near
+ // ::= F # private: virtual far
+ // ::= G # private: thunk near
+ // ::= H # private: thunk far
+ // ::= I # protected: near
+ // ::= J # protected: far
+ // ::= K # protected: static near
+ // ::= L # protected: static far
+ // ::= M # protected: virtual near
+ // ::= N # protected: virtual far
+ // ::= O # protected: thunk near
+ // ::= P # protected: thunk far
+ // ::= Q # public: near
+ // ::= R # public: far
+ // ::= S # public: static near
+ // ::= T # public: static far
+ // ::= U # public: virtual near
+ // ::= V # public: virtual far
+ // ::= W # public: thunk near
+ // ::= X # public: thunk far
+ // ::= Y # global near
+ // ::= Z # global far
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ switch (MD->getAccess()) {
+ default:
+ case AS_private:
+ if (MD->isStatic())
+ Out << 'C';
+ else if (MD->isVirtual())
+ Out << 'E';
+ else
+ Out << 'A';
+ break;
+ case AS_protected:
+ if (MD->isStatic())
+ Out << 'K';
+ else if (MD->isVirtual())
+ Out << 'M';
+ else
+ Out << 'I';
+ break;
+ case AS_public:
+ if (MD->isStatic())
+ Out << 'S';
+ else if (MD->isVirtual())
+ Out << 'U';
+ else
+ Out << 'Q';
+ }
+ } else
+ Out << 'Y';
+}
+void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
+ // <calling-convention> ::= A # __cdecl
+ // ::= B # __export __cdecl
+ // ::= C # __pascal
+ // ::= D # __export __pascal
+ // ::= E # __thiscall
+ // ::= F # __export __thiscall
+ // ::= G # __stdcall
+ // ::= H # __export __stdcall
+ // ::= I # __fastcall
+ // ::= J # __export __fastcall
+ // The 'export' calling conventions are from a bygone era
+ // (*cough*Win16*cough*) when functions were declared for export with
+ // that keyword. (It didn't actually export them, it just made them so
+ // that they could be in a DLL and somebody from another module could call
+ // them.)
+ switch (T->getCallConv()) {
+ case CC_Default:
+ case CC_C: Out << 'A'; break;
+ case CC_X86Pascal: Out << 'C'; break;
+ case CC_X86ThisCall: Out << 'E'; break;
+ case CC_X86StdCall: Out << 'G'; break;
+ case CC_X86FastCall: Out << 'I'; break;
+ }
+}
+void MicrosoftCXXNameMangler::mangleThrowSpecification(
+ const FunctionProtoType *FT) {
+ // <throw-spec> ::= Z # throw(...) (default)
+ // ::= @ # throw() or __declspec/__attribute__((nothrow))
+ // ::= <type>+
+ // NOTE: Since the Microsoft compiler ignores throw specifications, they are
+ // all actually mangled as 'Z'. (They're ignored because their associated
+ // functionality isn't implemented, and probably never will be.)
+ Out << 'Z';
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ assert(false && "Don't know how to mangle UnresolvedUsingTypes yet!");
+}
+
+// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
+// <union-type> ::= T <name>
+// <struct-type> ::= U <name>
+// <class-type> ::= V <name>
+// <enum-type> ::= W <size> <name>
+void MicrosoftCXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
+ switch (T->getDecl()->getTagKind()) {
+ case TTK_Union:
+ Out << 'T';
+ break;
+ case TTK_Struct:
+ Out << 'U';
+ break;
+ case TTK_Class:
+ Out << 'V';
+ break;
+ case TTK_Enum:
+ Out << 'W';
+ Out << getASTContext().getTypeSizeInChars(
+ cast<EnumDecl>(T->getDecl())->getIntegerType()).getQuantity();
+ break;
+ }
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= P <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as global
+// ::= Q <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as param
+// It's supposed to be the other way around, but for some strange reason, it
+// isn't. Today this behavior is retained for the sole purpose of backwards
+// compatibility.
+void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) {
+ // This isn't a recursive mangling, so now we have to do it all in this
+ // one call.
+ if (IsGlobal)
+ Out << 'P';
+ else
+ Out << 'Q';
+ mangleExtraDimensions(T->getElementType());
+}
+void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
+ llvm::SmallVector<llvm::APInt, 3> Dimensions;
+ for (;;) {
+ if (ElementTy->isConstantArrayType()) {
+ const ConstantArrayType *CAT =
+ static_cast<const ConstantArrayType *>(ElementTy.getTypePtr());
+ Dimensions.push_back(CAT->getSize());
+ ElementTy = CAT->getElementType();
+ } else if (ElementTy->isVariableArrayType()) {
+ assert(false && "Don't know how to mangle VLAs!");
+ } else if (ElementTy->isDependentSizedArrayType()) {
+ // The dependent expression has to be folded into a constant (TODO).
+ assert(false && "Don't know how to mangle dependent-sized arrays!");
+ } else if (ElementTy->isIncompleteArrayType()) continue;
+ else break;
+ }
+ mangleQualifiers(ElementTy.getQualifiers(), false);
+ // If there are any additional dimensions, mangle them now.
+ if (Dimensions.size() > 0) {
+ Out << 'Y';
+ // <dimension-count> ::= <number> # number of extra dimensions
+ mangleNumber(Dimensions.size());
+ for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) {
+ mangleNumber(Dimensions[Dim].getLimitedValue());
+ }
+ }
+ mangleType(ElementTy.getLocalUnqualifiedType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
+// <class name> <type>
+void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) {
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ Out << '8';
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(FPT, NULL, false, true);
+ } else {
+ mangleQualifiers(PointeeType.getQualifiers(), true);
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(PointeeType.getLocalUnqualifiedType());
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ assert(false && "Don't know how to mangle TemplateTypeParmTypes yet!");
+}
+
+// <type> ::= <pointer-type>
+// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const PointerType *T) {
+ QualType PointeeTy = T->getPointeeType();
+ if (PointeeTy->isArrayType()) {
+ // Pointers to arrays are mangled like arrays.
+ mangleExtraDimensions(T->getPointeeType());
+ } else if (PointeeTy->isFunctionType()) {
+ // Function pointers are special.
+ Out << '6';
+ mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()),
+ NULL, false, false);
+ } else {
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+ }
+}
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ // Object pointers never have qualifiers.
+ Out << 'A';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= <reference-type>
+// <reference-type> ::= A <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'A';
+ QualType PointeeTy = T->getPointeeType();
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+}
+
+void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T) {
+ assert(false && "Don't know how to mangle RValueReferenceTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ComplexType *T) {
+ assert(false && "Don't know how to mangle ComplexTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const VectorType *T) {
+ assert(false && "Don't know how to mangle VectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T) {
+ assert(false && "Don't know how to mangle ExtVectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ assert(false && "Don't know how to mangle DependentSizedExtVectorTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ // ObjC interfaces have structs underlying them.
+ Out << 'U';
+ mangleName(T->getDecl());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "_E";
+ mangleType(T->getPointeeType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ assert(false && "Don't know how to mangle InjectedClassNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ assert(false && "Don't know how to mangle TemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T) {
+ assert(false && "Don't know how to mangle DependentNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(
+ const DependentTemplateSpecializationType *T) {
+ assert(false &&
+ "Don't know how to mangle DependentTemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T) {
+ assert(false && "Don't know how to mangle TypeOfTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T) {
+ assert(false && "Don't know how to mangle TypeOfExprTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T) {
+ assert(false && "Don't know how to mangle DecltypeTypes yet!");
+}
+
+void MicrosoftMangleContext::mangleName(const NamedDecl *D,
+ llvm::SmallVectorImpl<char> &Name) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ MicrosoftCXXNameMangler Mangler(*this, Name);
+ return Mangler.mangle(D);
+}
+void MicrosoftMangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle thunks!");
+}
+void MicrosoftMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThisAdjustment &,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle destructor thunks!");
+}
+void MicrosoftMangleContext::mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle guard variables!");
+}
+void MicrosoftMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle virtual tables!");
+}
+void MicrosoftMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &) {
+ llvm_unreachable("The MS C++ ABI does not have virtual table tables!");
+}
+void MicrosoftMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
+ int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &) {
+ llvm_unreachable("The MS C++ ABI does not have constructor vtables!");
+}
+void MicrosoftMangleContext::mangleCXXRTTI(QualType T,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle RTTI!");
+}
+void MicrosoftMangleContext::mangleCXXRTTIName(QualType T,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle RTTI names!");
+}
+void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle constructors!");
+}
+void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle destructors!");
+}
+
+CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
+ return new MicrosoftCXXABI(CGM);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
new file mode 100644
index 0000000..6d9d277
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -0,0 +1,107 @@
+//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This builds an AST and converts it to LLVM Code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "CodeGenModule.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/OwningPtr.h"
+using namespace clang;
+
+namespace {
+ class CodeGeneratorImpl : public CodeGenerator {
+ Diagnostic &Diags;
+ llvm::OwningPtr<const llvm::TargetData> TD;
+ ASTContext *Ctx;
+ const CodeGenOptions CodeGenOpts; // Intentionally copied in.
+ protected:
+ llvm::OwningPtr<llvm::Module> M;
+ llvm::OwningPtr<CodeGen::CodeGenModule> Builder;
+ public:
+ CodeGeneratorImpl(Diagnostic &diags, const std::string& ModuleName,
+ const CodeGenOptions &CGO, llvm::LLVMContext& C)
+ : Diags(diags), CodeGenOpts(CGO), M(new llvm::Module(ModuleName, C)) {}
+
+ virtual ~CodeGeneratorImpl() {}
+
+ virtual llvm::Module* GetModule() {
+ return M.get();
+ }
+
+ virtual llvm::Module* ReleaseModule() {
+ return M.take();
+ }
+
+ virtual void Initialize(ASTContext &Context) {
+ Ctx = &Context;
+
+ M->setTargetTriple(Ctx->Target.getTriple().getTriple());
+ M->setDataLayout(Ctx->Target.getTargetDescription());
+ TD.reset(new llvm::TargetData(Ctx->Target.getTargetDescription()));
+ Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts,
+ *M, *TD, Diags));
+ }
+
+ virtual void HandleTopLevelDecl(DeclGroupRef DG) {
+ // Make sure to emit all elements of a Decl.
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ Builder->EmitTopLevelDecl(*I);
+ }
+
+ /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
+ /// to (e.g. struct, union, enum, class) is completed. This allows the
+ /// client hack on the type, which can occur at any point in the file
+ /// (because these can be defined in declspecs).
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ Builder->UpdateCompletedType(D);
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {
+ if (Diags.hasErrorOccurred()) {
+ M.reset();
+ return;
+ }
+
+ if (Builder)
+ Builder->Release();
+ }
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitTentativeDefinition(D);
+ }
+
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitVTable(RD, DefinitionRequired);
+ }
+ };
+}
+
+CodeGenerator *clang::CreateLLVMCodeGen(Diagnostic &Diags,
+ const std::string& ModuleName,
+ const CodeGenOptions &CGO,
+ llvm::LLVMContext& C) {
+ return new CodeGeneratorImpl(Diags, ModuleName, CGO, C);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/README.txt b/contrib/llvm/tools/clang/lib/CodeGen/README.txt
new file mode 100644
index 0000000..e6d6109
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/README.txt
@@ -0,0 +1,47 @@
+IRgen optimization opportunities.
+
+//===---------------------------------------------------------------------===//
+
+The common pattern of
+--
+short x; // or char, etc
+(x == 10)
+--
+generates an zext/sext of x which can easily be avoided.
+
+//===---------------------------------------------------------------------===//
+
+Bitfields accesses can be shifted to simplify masking and sign
+extension. For example, if the bitfield width is 8 and it is
+appropriately aligned then is is a lot shorter to just load the char
+directly.
+
+//===---------------------------------------------------------------------===//
+
+It may be worth avoiding creation of alloca's for formal arguments
+for the common situation where the argument is never written to or has
+its address taken. The idea would be to begin generating code by using
+the argument directly and if its address is taken or it is stored to
+then generate the alloca and patch up the existing code.
+
+In theory, the same optimization could be a win for block local
+variables as long as the declaration dominates all statements in the
+block.
+
+NOTE: The main case we care about this for is for -O0 -g compile time
+performance, and in that scenario we will need to emit the alloca
+anyway currently to emit proper debug info. So this is blocked by
+being able to emit debug information which refers to an LLVM
+temporary, not an alloca.
+
+//===---------------------------------------------------------------------===//
+
+We should try and avoid generating basic blocks which only contain
+jumps. At -O0, this penalizes us all the way from IRgen (malloc &
+instruction overhead), all the way down through code generation and
+assembly time.
+
+On 176.gcc:expr.ll, it looks like over 12% of basic blocks are just
+direct branches!
+
+//===---------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
new file mode 100644
index 0000000..4d221d4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -0,0 +1,2665 @@
+//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "ABIInfo.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/Type.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace CodeGen;
+
+static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
+ llvm::Value *Array,
+ llvm::Value *Value,
+ unsigned FirstIndex,
+ unsigned LastIndex) {
+ // Alternatively, we could emit this as a loop in the source.
+ for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
+ llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
+ Builder.CreateStore(Value, Cell);
+ }
+}
+
+static bool isAggregateTypeForABI(QualType T) {
+ return CodeGenFunction::hasAggregateLLVMType(T) ||
+ T->isMemberFunctionPointerType();
+}
+
+ABIInfo::~ABIInfo() {}
+
+ASTContext &ABIInfo::getContext() const {
+ return CGT.getContext();
+}
+
+llvm::LLVMContext &ABIInfo::getVMContext() const {
+ return CGT.getLLVMContext();
+}
+
+const llvm::TargetData &ABIInfo::getTargetData() const {
+ return CGT.getTargetData();
+}
+
+
+void ABIArgInfo::dump() const {
+ llvm::raw_ostream &OS = llvm::errs();
+ OS << "(ABIArgInfo Kind=";
+ switch (TheKind) {
+ case Direct:
+ OS << "Direct Type=";
+ if (const llvm::Type *Ty = getCoerceToType())
+ Ty->print(OS);
+ else
+ OS << "null";
+ break;
+ case Extend:
+ OS << "Extend";
+ break;
+ case Ignore:
+ OS << "Ignore";
+ break;
+ case Indirect:
+ OS << "Indirect Align=" << getIndirectAlign()
+ << " Byal=" << getIndirectByVal();
+ break;
+ case Expand:
+ OS << "Expand";
+ break;
+ }
+ OS << ")\n";
+}
+
+TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
+
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
+ bool AllowArrays) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+
+ // Constant arrays of empty records count as empty, strip them off.
+ if (AllowArrays)
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
+ FT = AT->getElementType();
+
+ const RecordType *RT = FT->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // C++ record fields are never empty, at least in the Itanium ABI.
+ //
+ // FIXME: We should use a predicate for whether this behavior is true in the
+ // current ABI.
+ if (isa<CXXRecordDecl>(RT->getDecl()))
+ return false;
+
+ return isEmptyRecord(Context, FT, AllowArrays);
+}
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i)
+ if (!isEmptyRecord(Context, i->getType(), true))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i)
+ if (!isEmptyField(Context, *i, AllowArrays))
+ return false;
+ return true;
+}
+
+/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
+/// a non-trivial destructor or a non-trivial copy constructor.
+static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return false;
+
+ return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
+}
+
+/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
+/// a record type with either a non-trivial destructor or a non-trivial copy
+/// constructor.
+static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ return hasNonTrivialDestructorOrCopyConstructor(RT);
+}
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAsStructureType();
+ if (!RT)
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return 0;
+
+ const Type *Found = 0;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ // Ignore empty records.
+ if (isEmptyRecord(Context, i->getType(), true))
+ continue;
+
+ // If we already found an element then this isn't a single-element struct.
+ if (Found)
+ return 0;
+
+ // If this is non-empty and not a single element struct, the composite
+ // cannot be a single element struct.
+ Found = isSingleElementStruct(i->getType(), Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ // Check for single element.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return 0;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!isAggregateTypeForABI(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ return Found;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
+ !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
+ !Ty->isBlockPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+/// canExpandIndirectArgument - Test whether an argument type which is to be
+/// passed indirectly (on the stack) would have the equivalent layout if it was
+/// expanded into separate arguments. If so, we prefer to do the latter to avoid
+/// inhibiting optimizations.
+///
+// FIXME: This predicate is missing many cases, currently it just follows
+// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
+// should probably make this smarter, or better yet make the LLVM backend
+// capable of handling it.
+static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
+ // We can only expand structure types.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // We can only expand (C) structures.
+ //
+ // FIXME: This needs to be generalized to handle classes as well.
+ const RecordDecl *RD = RT->getDecl();
+ if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+ }
+
+ return true;
+}
+
+namespace {
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+public:
+ DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+};
+
+llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+//===----------------------------------------------------------------------===//
+// X86-32 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ bool IsDarwinVectorABI;
+ bool IsSmallStructInRegABI;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
+
+public:
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
+ : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p) {}
+};
+
+class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p)) {}
+
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const;
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ // Darwin uses different dwarf register numbers for EH.
+ if (CGM.isTargetDarwin()) return 5;
+
+ return 4;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
+}
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// passed in a register (for the Darwin ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Type must be register sized.
+ if (!isRegisterSize(Size))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, enum, complex type, member pointer, or
+ // member function pointer it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
+ Ty->isAnyComplexType() || Ty->isEnumeralType() ||
+ Ty->isBlockPointerType() || Ty->isMemberPointerType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // FIXME: Traverse bases here too.
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
+ e = RT->getDecl()->field_end(); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ return false;
+ }
+
+ return true;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getDirect(llvm::VectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ if (isAggregateTypeForABI(RetTy)) {
+ if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Structures with flexible arrays are always indirect.
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // If specified, structs and unions are always indirect.
+ if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
+ return ABIArgInfo::getIndirect(0);
+
+ // Classify "single element" structs as their element type.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) {
+ if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
+ if (BT->isIntegerType()) {
+ // We need to use the size of the structure, padding
+ // bit-fields can adjust that to be larger than the single
+ // element type.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), (unsigned)Size));
+ }
+
+ if (BT->getKind() == BuiltinType::Float) {
+ assert(getContext().getTypeSize(RetTy) ==
+ getContext().getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getDirect(llvm::Type::getFloatTy(getVMContext()));
+ }
+
+ if (BT->getKind() == BuiltinType::Double) {
+ assert(getContext().getTypeSize(RetTy) ==
+ getContext().getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getDirect(llvm::Type::getDoubleTy(getVMContext()));
+ }
+ } else if (SeltTy->isPointerType()) {
+ // FIXME: It would be really nice if this could come out as the proper
+ // pointer type.
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
+ return ABIArgInfo::getDirect(PtrTy);
+ } else if (SeltTy->isVectorType()) {
+ // 64- and 128-bit vectors are never returned in a
+ // register when inside a structure.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size == 64 || Size == 128)
+ return ABIArgInfo::getIndirect(0);
+
+ return classifyReturnType(QualType(SeltTy, 0));
+ }
+ }
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
+ if (!ByVal)
+ return ABIArgInfo::getIndirect(0, false);
+
+ // Compute the byval alignment. We trust the back-end to honor the
+ // minimum ABI alignment for byval, to make cleaner IR.
+ const unsigned MinABIAlign = 4;
+ unsigned Align = getContext().getTypeAlign(Ty) / 8;
+ if (Align > MinABIAlign)
+ return ABIArgInfo::getIndirect(Align);
+ return ABIArgInfo::getIndirect(0);
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
+ // FIXME: Set alignment on indirect arguments.
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return getIndirectResult(Ty, /*ByVal=*/false);
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectResult(Ty);
+ }
+
+ // Ignore empty structs.
+ if (Ty->isStructureType() && getContext().getTypeSize(Ty) == 0)
+ return ABIArgInfo::getIgnore();
+
+ // Expand small (<= 128-bit) record types when we know that the stack layout
+ // of those arguments will match the struct. This is important because the
+ // LLVM backend isn't smart enough to remove byval, which inhibits many
+ // optimizations.
+ if (getContext().getTypeSize(Ty) <= 4*32 &&
+ canExpandIndirectArgument(Ty, getContext()))
+ return ABIArgInfo::getExpand();
+
+ return getIndirectResult(Ty);
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // On Darwin, some vectors are passed in memory, we handle this by passing
+ // it as an i8/i16/i32/i64.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ // Get the LLVM function.
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ // Now add the 'alignstack' attribute with a value of 16.
+ Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
+ }
+ }
+}
+
+bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+
+ // 0-7 are the eight integer registers; the order is different
+ // on Darwin (for EH), but the range is the same.
+ // 8 is %eip.
+ AssignToArrayRange(Builder, Address, Four8, 0, 8);
+
+ if (CGF.CGM.isTargetDarwin()) {
+ // 12-16 are st(0..4). Not sure why we stop at 4.
+ // These have size 16, which is sizeof(long double) on
+ // platforms with 8-byte alignment for that type.
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+ AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
+
+ } else {
+ // 9 is %eflags, which doesn't get a size on Darwin for some
+ // reason.
+ Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
+
+ // 11-16 are st(0..5). Not sure why we stop at 5.
+ // These have size 12, which is sizeof(long double) on
+ // platforms with 4-byte alignment for that type.
+ llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12);
+ AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+
+namespace {
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ static Class merge(Class Accum, Class Field);
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
+
+ const llvm::Type *Get16ByteVectorType(QualType Ty) const;
+ const llvm::Type *GetSSETypeAtOffset(const llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+ const llvm::Type *GetINTEGERTypeAtOffset(const llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be returned in memory.
+ ABIArgInfo getIndirectReturnResult(QualType Ty) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty, unsigned &neededInt,
+ unsigned &neededSSE) const;
+
+public:
+ X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
+class WinX86_64ABIInfo : public X86_64ABIInfo {
+public:
+ WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : X86_64ABIInfo(CGT) {}
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(Builder, Address, Eight8, 0, 16);
+
+ return false;
+ }
+};
+
+class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(Builder, Address, Eight8, 0, 16);
+
+ return false;
+ }
+};
+
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ if (Field == Memory)
+ return Memory;
+ if (Accum == NoClass)
+ return Field;
+ if (Accum == Integer || Field == Integer)
+ return Integer;
+ if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ Current = SSE;
+ } else if (k == BuiltinType::LongDouble) {
+ Lo = X87;
+ Hi = X87Up;
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ return;
+ }
+
+ if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
+ return;
+ }
+
+ if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ return;
+ }
+
+ if (Ty->isMemberPointerType()) {
+ if (Ty->isMemberFunctionPointerType())
+ Lo = Hi = Integer;
+ else
+ Current = Integer;
+ return;
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ if (Size == 32) {
+ // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
+ // float> as integer.
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
+ if (EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (Size == 64) {
+ // gcc passes <1 x double> in memory. :(
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as INTEGER.
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128) {
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ return;
+ }
+
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType ET = getContext().getCanonicalType(CT->getElementType());
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (ET->isIntegralOrEnumerationType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET == getContext().FloatTy)
+ Current = SSE;
+ else if (ET == getContext().DoubleTy)
+ Lo = Hi = SSE;
+ else if (ET == getContext().LongDoubleTy)
+ Current = ComplexX87;
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+
+ return;
+ }
+
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // Do post merger cleanup (see below). Only case we worry about is Memory.
+ if (Hi == Memory)
+ Lo = Memory;
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ return;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
+ // copy constructor or a non-trivial destructor, it is passed by invisible
+ // reference.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+
+ // If this is a C++ record, classify the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
+ // single eightbyte, each is classified separately. Each eightbyte gets
+ // initialized to class NO_CLASS.
+ Class FieldLo, FieldHi;
+ uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
+ classify(i->getType(), Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+ }
+
+ // Classify the fields one at a time, merging the results.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
+ Lo = Memory;
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ // Ignore padding bit-fields.
+ if (i->isUnnamedBitfield())
+ continue;
+
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size =
+ i->getBitWidth()->EvaluateAsInt(getContext()).getZExtValue();
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+ FieldLo = FieldHi = NoClass;
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is MEMORY, the whole argument is
+ // passed in memory.
+ //
+ // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
+
+ // The first of these conditions is guaranteed by how we implement
+ // the merge (just bail).
+ //
+ // The second condition occurs in the case of unions; for example
+ // union { _Complex double; unsigned; }.
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Compute the byval alignment. We trust the back-end to honor the
+ // minimum ABI alignment for byval, to make cleaner IR.
+ const unsigned MinABIAlign = 8;
+ unsigned Align = getContext().getTypeAlign(Ty) / 8;
+ if (Align > MinABIAlign)
+ return ABIArgInfo::getIndirect(Align);
+ return ABIArgInfo::getIndirect(0);
+}
+
+/// Get16ByteVectorType - The ABI specifies that a value should be passed in an
+/// full vector XMM register. Pick an LLVM IR type that will be passed as a
+/// vector register.
+const llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const {
+ const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+
+ // Wrapper structs that just contain vectors are passed just like vectors,
+ // strip them off if present.
+ const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
+ while (STy && STy->getNumElements() == 1) {
+ IRType = STy->getElementType(0);
+ STy = dyn_cast<llvm::StructType>(IRType);
+ }
+
+ // If the preferred type is a 16-byte vector, prefer to pass it.
+ if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
+ const llvm::Type *EltTy = VT->getElementType();
+ if (VT->getBitWidth() == 128 &&
+ (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
+ EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
+ EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
+ EltTy->isIntegerTy(128)))
+ return VT;
+ }
+
+ return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
+}
+
+/// BitsContainNoUserData - Return true if the specified [start,end) bit range
+/// is known to either be off the end of the specified type or being in
+/// alignment padding. The user type specified is known to be at most 128 bits
+/// in size, and have passed through X86_64ABIInfo::classify with a successful
+/// classification that put one of the two halves in the INTEGER class.
+///
+/// It is conservatively correct to return false.
+static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
+ unsigned EndBit, ASTContext &Context) {
+ // If the bytes being queried are off the end of the type, there is no user
+ // data hiding here. This handles analysis of builtins, vectors and other
+ // types that don't contain interesting padding.
+ unsigned TySize = (unsigned)Context.getTypeSize(Ty);
+ if (TySize <= StartBit)
+ return true;
+
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
+ unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
+
+ // Check each element to see if the element overlaps with the queried range.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // If the element is after the span we care about, then we're done..
+ unsigned EltOffset = i*EltSize;
+ if (EltOffset >= EndBit) break;
+
+ unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
+ if (!BitsContainNoUserData(AT->getElementType(), EltStart,
+ EndBit-EltOffset, Context))
+ return false;
+ }
+ // If it overlaps no elements, then it is safe to process as padding.
+ return true;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ // If the base is after the span we care about, ignore it.
+ unsigned BaseOffset = (unsigned)Layout.getBaseClassOffset(Base);
+ if (BaseOffset >= EndBit) continue;
+
+ unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
+ if (!BitsContainNoUserData(i->getType(), BaseStart,
+ EndBit-BaseOffset, Context))
+ return false;
+ }
+ }
+
+ // Verify that no field has data that overlaps the region of interest. Yes
+ // this could be sped up a lot by being smarter about queried fields,
+ // however we're only looking at structs up to 16 bytes, so we don't care
+ // much.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
+
+ // If we found a field after the region we care about, then we're done.
+ if (FieldOffset >= EndBit) break;
+
+ unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
+ if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
+ Context))
+ return false;
+ }
+
+ // If nothing in this record overlapped the area of interest, then we're
+ // clean.
+ return true;
+ }
+
+ return false;
+}
+
+/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
+/// float member at the specified offset. For example, {int,{float}} has a
+/// float at offset 4. It is conservatively correct for this routine to return
+/// false.
+static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
+ const llvm::TargetData &TD) {
+ // Base case if we find a float.
+ if (IROffset == 0 && IRType->isFloatTy())
+ return true;
+
+ // If this is a struct, recurse into the field at the specified offset.
+ if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ const llvm::StructLayout *SL = TD.getStructLayout(STy);
+ unsigned Elt = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(Elt);
+ return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
+ }
+
+ // If this is an array, recurse into the field at the specified offset.
+ if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ const llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = TD.getTypeAllocSize(EltTy);
+ IROffset -= IROffset/EltSize*EltSize;
+ return ContainsFloatAtOffset(EltTy, IROffset, TD);
+ }
+
+ return false;
+}
+
+
+/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
+/// low 8 bytes of an XMM register, corresponding to the SSE class.
+const llvm::Type *X86_64ABIInfo::
+GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // The only three choices we have are either double, <2 x float>, or float. We
+ // pass as float if the last 4 bytes is just padding. This happens for
+ // structs that contain 3 floats.
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
+ SourceOffset*8+64, getContext()))
+ return llvm::Type::getFloatTy(getVMContext());
+
+ // We want to pass as <2 x float> if the LLVM IR type contains a float at
+ // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
+ // case.
+ if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) &&
+ ContainsFloatAtOffset(IRType, IROffset+4, getTargetData()))
+ return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
+
+ return llvm::Type::getDoubleTy(getVMContext());
+}
+
+
+/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
+/// an 8-byte GPR. This means that we either have a scalar or we are talking
+/// about the high or low part of an up-to-16-byte struct. This routine picks
+/// the best LLVM IR type to represent this, which may be i64 or may be anything
+/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
+/// etc).
+///
+/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
+/// the source type. IROffset is an offset in bytes into the LLVM IR type that
+/// the 8-byte value references. PrefType may be null.
+///
+/// SourceTy is the source level type for the entire argument. SourceOffset is
+/// an offset into this that we're processing (which is always either 0 or 8).
+///
+const llvm::Type *X86_64ABIInfo::
+GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // If we're dealing with an un-offset LLVM IR type, then it means that we're
+ // returning an 8-byte unit starting with it. See if we can safely use it.
+ if (IROffset == 0) {
+ // Pointers and int64's always fill the 8-byte unit.
+ if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64))
+ return IRType;
+
+ // If we have a 1/2/4-byte integer, we can use it only if the rest of the
+ // goodness in the source type is just tail padding. This is allowed to
+ // kick in for struct {double,int} on the int, but not on
+ // struct{double,int,int} because we wouldn't return the second int. We
+ // have to do this analysis on the source type because we can't depend on
+ // unions being lowered a specific way etc.
+ if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
+ IRType->isIntegerTy(32)) {
+ unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth();
+
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
+ SourceOffset*8+64, getContext()))
+ return IRType;
+ }
+ }
+
+ if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ // If this is a struct, recurse into the field at the specified offset.
+ const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
+ if (IROffset < SL->getSizeInBytes()) {
+ unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(FieldIdx);
+
+ return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
+ SourceTy, SourceOffset);
+ }
+ }
+
+ if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ const llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
+ unsigned EltOffset = IROffset/EltSize*EltSize;
+ return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
+ SourceOffset);
+ }
+
+ // Okay, we don't have any better idea of what to pass, so we pass this in an
+ // integer register that isn't too big to fit the rest of the struct.
+ unsigned TySizeInBytes =
+ (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
+
+ assert(TySizeInBytes != SourceOffset && "Empty field?");
+
+ // It is always safe to classify this as an integer type up to i64 that
+ // isn't larger than the structure.
+ return llvm::IntegerType::get(getVMContext(),
+ std::min(TySizeInBytes-SourceOffset, 8U)*8);
+}
+
+
+/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
+/// be used as elements of a two register pair to pass or return, return a
+/// first class aggregate to represent them. For example, if the low part of
+/// a by-value argument should be passed as i32* and the high part as float,
+/// return {i32*, float}.
+static const llvm::Type *
+GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
+ const llvm::TargetData &TD) {
+ // In order to correctly satisfy the ABI, we need to the high part to start
+ // at offset 8. If the high and low parts we inferred are both 4-byte types
+ // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
+ // the second element at offset 8. Check for this:
+ unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
+ unsigned HiAlign = TD.getABITypeAlignment(Hi);
+ unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
+ assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
+
+ // To handle this, we have to increase the size of the low part so that the
+ // second element will start at an 8 byte offset. We can't increase the size
+ // of the second element because it might make us access off the end of the
+ // struct.
+ if (HiStart != 8) {
+ // There are only two sorts of types the ABI generation code can produce for
+ // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
+ // Promote these to a larger type.
+ if (Lo->isFloatTy())
+ Lo = llvm::Type::getDoubleTy(Lo->getContext());
+ else {
+ assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
+ Lo = llvm::Type::getInt64Ty(Lo->getContext());
+ }
+ }
+
+ const llvm::StructType *Result =
+ llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL);
+
+
+ // Verify that the second element is at an 8-byte offset.
+ assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
+ "Invalid x86-64 argument pair!");
+ return Result;
+}
+
+ABIArgInfo X86_64ABIInfo::
+classifyReturnType(QualType RetTy) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, 0, Lo, Hi);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectReturnResult(RetTy);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0,
+ RetTy, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (RetTy->isIntegralOrEnumerationType() &&
+ RetTy->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ }
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, RetTy, 0);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::getX86_FP80Ty(getVMContext());
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(getVMContext(),
+ llvm::Type::getX86_FP80Ty(getVMContext()),
+ llvm::Type::getX86_FP80Ty(getVMContext()),
+ NULL);
+ break;
+ }
+
+ const llvm::Type *HighPart = 0;
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ assert(0 && "Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass:
+ break;
+
+ case Integer:
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
+ 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the upper half of the last used SSE register.
+ //
+ // SSEUP should always be preceeded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = Get16ByteVectorType(RetTy);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceeded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceeded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87) {
+ HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
+ 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ }
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
+ unsigned &neededSSE) const {
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, 0, Lo, Hi);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ return getIndirectResult(Ty);
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+
+ // Pick an 8-byte type based on the preferred type.
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isIntegralOrEnumerationType() &&
+ Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ }
+
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE:
+ ++neededSSE;
+ ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
+ break;
+ }
+
+ const llvm::Type *HighPart = 0;
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceed by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ assert(0 && "Invalid classification for hi word.");
+ break;
+
+ case NoClass: break;
+
+ case Integer:
+ ++neededInt;
+ // Pick an 8-byte type based on the preferred type.
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register. This only happens when 128-bit vectors are passed.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification");
+ ResType = Get16ByteVectorType(Ty);
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ // Keep track of the number of assigned registers.
+ unsigned freeIntRegs = 6, freeSSERegs = 8;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --freeIntRegs;
+
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ unsigned neededInt, neededSSE;
+ it->info = classifyArgumentType(it->type, neededInt, neededSSE);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
+ freeIntRegs -= neededInt;
+ freeSSERegs -= neededSSE;
+ } else {
+ it->info = getIndirectResult(it->type);
+ }
+ }
+}
+
+static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) {
+ llvm::Value *overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 8) {
+ // Note that we follow the ABI & gcc here, even though the type
+ // could in theory have an alignment greater than 16. This case
+ // shouldn't ever matter in practice.
+
+ // overflow_arg_area = (overflow_arg_area + 15) & ~15;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, 15);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
+ CGF.Int64Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL);
+ overflow_arg_area =
+ CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ overflow_arg_area->getType(),
+ "overflow_arg_area.align");
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res =
+ CGF.Builder.CreateBitCast(overflow_arg_area,
+ llvm::PointerType::getUnqual(LTy));
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
+ "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Res;
+}
+
+llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::LLVMContext &VMContext = CGF.getLLVMContext();
+
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+
+ Ty = CGF.getContext().getCanonicalType(Ty);
+ ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = 0;
+ llvm::Value *gp_offset_p = 0, *gp_offset = 0;
+ llvm::Value *fp_offset_p = 0, *fp_offset = 0;
+ if (neededInt) {
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
+ InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
+ FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegAddr =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
+ "reg_save_area");
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
+ const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ const llvm::Type *TyLo = ST->getElementType(0);
+ const llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
+ "Unexpected ABI info for mixed regs");
+ const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
+ llvm::Value *V =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ } else if (neededInt) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else if (neededSSE == 1) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
+ const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+ const llvm::Type *DblPtrTy =
+ llvm::PointerType::getUnqual(DoubleTy);
+ const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
+ DoubleTy, NULL);
+ llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
+ "vaarg.addr");
+ ResAddr->reserveOperandSpace(2);
+ ResAddr->addIncoming(RegAddr, InRegBlock);
+ ResAddr->addIncoming(MemAddr, InMemBlock);
+ return ResAddr;
+}
+
+llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+//===----------------------------------------------------------------------===//
+// PIC16 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class PIC16ABIInfo : public ABIInfo {
+public:
+ PIC16ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PIC16TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new PIC16ABIInfo(CGT)) {}
+};
+
+}
+
+ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty) const {
+ return ABIArgInfo::getDirect();
+}
+
+llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset = CGF.getContext().getTypeSize(Ty) / 8;
+
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+
+// PowerPC-32
+
+namespace {
+class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
+}
+
+bool
+PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 4-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Four8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-76 are various 4-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 64, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Four8, 109, 113);
+
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// ARM ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ARMABIInfo : public ABIInfo {
+public:
+ enum ABIKind {
+ APCS = 0,
+ AAPCS = 1,
+ AAPCS_VFP
+ };
+
+private:
+ ABIKind Kind;
+
+public:
+ ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {}
+
+private:
+ ABIKind getABIKind() const { return Kind; }
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
+ :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return 13;
+ }
+};
+
+}
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+
+ const llvm::Triple &Triple(getContext().Target.getTriple());
+ llvm::CallingConv::ID DefaultCC;
+ if (Triple.getEnvironmentName() == "gnueabi" ||
+ Triple.getEnvironmentName() == "eabi")
+ DefaultCC = llvm::CallingConv::ARM_AAPCS;
+ else
+ DefaultCC = llvm::CallingConv::ARM_APCS;
+
+ switch (getABIKind()) {
+ case APCS:
+ if (DefaultCC != llvm::CallingConv::ARM_APCS)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
+ break;
+
+ case AAPCS:
+ if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
+ break;
+
+ case AAPCS_VFP:
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
+ break;
+ }
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // FIXME: This is kind of nasty... but there isn't much choice because the ARM
+ // backend doesn't support byval.
+ // FIXME: This doesn't handle alignment > 64 bits.
+ const llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ if (getContext().getTypeAlign(Ty) > 32) {
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
+ } else {
+ ElemTy = llvm::Type::getInt32Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ }
+ std::vector<const llvm::Type*> LLVMFields;
+ LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
+ const llvm::Type* STy = llvm::StructType::get(getVMContext(), LLVMFields,
+ true);
+ return ABIArgInfo::getDirect(STy);
+}
+
+static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &VMContext) {
+ // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
+ // is called integer-like if its size is less than or equal to one word, and
+ // the offset of each of its addressable sub-fields is zero.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Check that the type fits in a word.
+ if (Size > 32)
+ return false;
+
+ // FIXME: Handle vector types!
+ if (Ty->isVectorType())
+ return false;
+
+ // Float types are never treated as "integer like".
+ if (Ty->isRealFloatingType())
+ return false;
+
+ // If this is a builtin or pointer type then it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
+ return true;
+
+ // Small complex integer types are "integer like".
+ if (const ComplexType *CT = Ty->getAs<ComplexType>())
+ return isIntegerLikeType(CT->getElementType(), Context, VMContext);
+
+ // Single element and zero sized arrays should be allowed, by the definition
+ // above, but they are not.
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // Ignore records with flexible arrays.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // Check that all sub-fields are at offset 0, and are themselves "integer
+ // like".
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ bool HadField = false;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const FieldDecl *FD = *i;
+
+ // Bit-fields are not addressable, we only need to verify they are "integer
+ // like". We still have to disallow a subsequent non-bitfield, for example:
+ // struct { int : 0; int x }
+ // is non-integer like according to gcc.
+ if (FD->isBitField()) {
+ if (!RD->isUnion())
+ HadField = true;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ continue;
+ }
+
+ // Check if this field is at offset 0.
+ if (Layout.getFieldOffset(idx) != 0)
+ return false;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ // Only allow at most one field in a structure. This doesn't match the
+ // wording above, but follows gcc in situations with a field following an
+ // empty structure.
+ if (!RD->isUnion()) {
+ if (HadField)
+ return false;
+
+ HadField = true;
+ }
+ }
+
+ return true;
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Are we following APCS?
+ if (getABIKind() == APCS) {
+ if (isEmptyRecord(getContext(), RetTy, false))
+ return ABIArgInfo::getIgnore();
+
+ // Complex types are all returned as packed integers.
+ //
+ // FIXME: Consider using 2 x vector types if the back end handles them
+ // correctly.
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ getContext().getTypeSize(RetTy)));
+
+ // Integer like structures are returned in r0.
+ if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
+ // Return in the smallest viable integer type.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ }
+
+ // Otherwise return in memory.
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Otherwise this is an AAPCS variant.
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 32) {
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
+llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Need to handle alignment
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+//===----------------------------------------------------------------------===//
+// SystemZ ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class SystemZABIInfo : public ABIInfo {
+public:
+ SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ bool isPromotableIntegerType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
+};
+
+}
+
+bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
+ // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Implement
+ return 0;
+}
+
+
+ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableIntegerType(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableIntegerType(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+//===----------------------------------------------------------------------===//
+// MSP430 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+}
+
+void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::MSP430_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // Step 3: Emit ISR vector alias.
+ unsigned Num = attr->getNumber() + 0xffe0;
+ new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
+ "vector_" +
+ llvm::LowercaseString(llvm::utohexstr(Num)),
+ GV, &M.getModule());
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// MIPS ABI Implementation. This works for both little-endian and
+// big-endian variants.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MIPSTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 29;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+}
+
+bool
+MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This information comes from gcc's implementation, which seems to
+ // as canonical as it gets.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ // Everything on MIPS is 4 bytes. Double-precision FP registers
+ // are aliased to pairs of single-precision FP registers.
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+
+ // 0-31 are the general purpose registers, $0 - $31.
+ // 32-63 are the floating-point registers, $f0 - $f31.
+ // 64 and 65 are the multiply/divide registers, $hi and $lo.
+ // 66 is the (notional, I think) register for signal-handler return.
+ AssignToArrayRange(Builder, Address, Four8, 0, 65);
+
+ // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
+ // They are one bit wide and ignored here.
+
+ // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
+ // (coprocessor 1 is the FP unit)
+ // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
+ // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
+ // 176-181 are the DSP accumulator registers.
+ AssignToArrayRange(Builder, Address, Four8, 80, 181);
+
+ return false;
+}
+
+
+const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
+ if (TheTargetCodeGenInfo)
+ return *TheTargetCodeGenInfo;
+
+ // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
+ // free it.
+
+ const llvm::Triple &Triple = getContext().Target.getTriple();
+ switch (Triple.getArch()) {
+ default:
+ return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
+
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types));
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ // FIXME: We want to know the float calling convention as well.
+ if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
+ return *(TheTargetCodeGenInfo =
+ new ARMTargetCodeGenInfo(Types, ARMABIInfo::APCS));
+
+ return *(TheTargetCodeGenInfo =
+ new ARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS));
+
+ case llvm::Triple::pic16:
+ return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo(Types));
+
+ case llvm::Triple::ppc:
+ return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
+
+ case llvm::Triple::systemz:
+ return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
+
+ case llvm::Triple::msp430:
+ return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
+
+ case llvm::Triple::x86:
+ switch (Triple.getOS()) {
+ case llvm::Triple::Darwin:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(Types, true, true));
+ case llvm::Triple::Cygwin:
+ case llvm::Triple::MinGW32:
+ case llvm::Triple::AuroraUX:
+ case llvm::Triple::DragonFly:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::OpenBSD:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(Types, false, true));
+
+ default:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(Types, false, false));
+ }
+
+ case llvm::Triple::x86_64:
+ switch (Triple.getOS()) {
+ case llvm::Triple::Win32:
+ case llvm::Triple::MinGW64:
+ case llvm::Triple::Cygwin:
+ return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
+ default:
+ return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types));
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
new file mode 100644
index 0000000..9d4cf16
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
@@ -0,0 +1,108 @@
+//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_TARGETINFO_H
+#define CLANG_CODEGEN_TARGETINFO_H
+
+namespace llvm {
+ class GlobalValue;
+ class Value;
+}
+
+namespace clang {
+ class ABIInfo;
+ class Decl;
+
+ namespace CodeGen {
+ class CodeGenModule;
+ class CodeGenFunction;
+ }
+
+ /// TargetCodeGenInfo - This class organizes various target-specific
+ /// codegeneration issues, like target-specific attributes, builtins and so
+ /// on.
+ class TargetCodeGenInfo {
+ ABIInfo *Info;
+ public:
+ // WARNING: Acquires the ownership of ABIInfo.
+ TargetCodeGenInfo(ABIInfo *info = 0):Info(info) { }
+ virtual ~TargetCodeGenInfo();
+
+ /// getABIInfo() - Returns ABI info helper for the target.
+ const ABIInfo& getABIInfo() const { return *Info; }
+
+ /// SetTargetAttributes - Provides a convenient hook to handle extra
+ /// target-specific attributes for the given global.
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const { }
+
+ /// Determines the size of struct _Unwind_Exception on this platform,
+ /// in 8-bit units. The Itanium ABI defines this as:
+ /// struct _Unwind_Exception {
+ /// uint64 exception_class;
+ /// _Unwind_Exception_Cleanup_Fn exception_cleanup;
+ /// uint64 private_1;
+ /// uint64 private_2;
+ /// };
+ unsigned getSizeOfUnwindException() const { return 32; }
+
+ /// Controls whether __builtin_extend_pointer should sign-extend
+ /// pointers to uint64_t or zero-extend them (the default). Has
+ /// no effect for targets:
+ /// - that have 64-bit pointers, or
+ /// - that cannot address through registers larger than pointers, or
+ /// - that implicitly ignore/truncate the top bits when addressing
+ /// through such registers.
+ virtual bool extendPointerWithSExt() const { return false; }
+
+ /// Determines the DWARF register number for the stack pointer, for
+ /// exception-handling purposes. Implements __builtin_dwarf_sp_column.
+ ///
+ /// Returns -1 if the operation is unsupported by this target.
+ virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return -1;
+ }
+
+ /// Initializes the given DWARF EH register-size table, a char*.
+ /// Implements __builtin_init_dwarf_reg_size_table.
+ ///
+ /// Returns true if the operation is unsupported by this target.
+ virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return true;
+ }
+
+ /// Performs the code-generation required to convert a return
+ /// address as stored by the system into the actual address of the
+ /// next instruction that will be executed.
+ ///
+ /// Used by __builtin_extract_return_addr().
+ virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+
+ /// Performs the code-generation required to convert the address
+ /// of an instruction into a return address suitable for storage
+ /// by the system in a return slot.
+ ///
+ /// Used by __builtin_frob_return_addr().
+ virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+ };
+}
+
+#endif // CLANG_CODEGEN_TARGETINFO_H
OpenPOWER on IntegriCloud